repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
SmartPeople/zulip
|
refs/heads/master
|
zerver/migrations/0045_realm_waiting_period_threshold.py
|
33
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0044_reaction'),
]
operations = [
migrations.AddField(
model_name='realm',
name='waiting_period_threshold',
field=models.PositiveIntegerField(default=0),
),
]
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/hybridkubernetes/azure-mgmt-hybridkubernetes/azure/mgmt/hybridkubernetes/aio/operations/__init__.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._connected_cluster_operations import ConnectedClusterOperations
from ._operations import Operations
__all__ = [
'ConnectedClusterOperations',
'Operations',
]
|
ammarkhann/FinalSeniorCode
|
refs/heads/master
|
lib/python2.7/site-packages/scipy/ndimage/__init__.py
|
81
|
"""
=========================================================
Multi-dimensional image processing (:mod:`scipy.ndimage`)
=========================================================
.. currentmodule:: scipy.ndimage
This package contains various functions for multi-dimensional image
processing.
Filters
=======
.. autosummary::
:toctree: generated/
convolve - Multi-dimensional convolution
convolve1d - 1-D convolution along the given axis
correlate - Multi-dimensional correlation
correlate1d - 1-D correlation along the given axis
gaussian_filter
gaussian_filter1d
gaussian_gradient_magnitude
gaussian_laplace
generic_filter - Multi-dimensional filter using a given function
generic_filter1d - 1-D generic filter along the given axis
generic_gradient_magnitude
generic_laplace
laplace - n-D Laplace filter based on approximate second derivatives
maximum_filter
maximum_filter1d
median_filter - Calculates a multi-dimensional median filter
minimum_filter
minimum_filter1d
percentile_filter - Calculates a multi-dimensional percentile filter
prewitt
rank_filter - Calculates a multi-dimensional rank filter
sobel
uniform_filter - Multi-dimensional uniform filter
uniform_filter1d - 1-D uniform filter along the given axis
Fourier filters
===============
.. autosummary::
:toctree: generated/
fourier_ellipsoid
fourier_gaussian
fourier_shift
fourier_uniform
Interpolation
=============
.. autosummary::
:toctree: generated/
affine_transform - Apply an affine transformation
geometric_transform - Apply an arbritrary geometric transform
map_coordinates - Map input array to new coordinates by interpolation
rotate - Rotate an array
shift - Shift an array
spline_filter
spline_filter1d
zoom - Zoom an array
Measurements
============
.. autosummary::
:toctree: generated/
center_of_mass - The center of mass of the values of an array at labels
extrema - Min's and max's of an array at labels, with their positions
find_objects - Find objects in a labeled array
histogram - Histogram of the values of an array, optionally at labels
label - Label features in an array
labeled_comprehension
maximum
maximum_position
mean - Mean of the values of an array at labels
median
minimum
minimum_position
standard_deviation - Standard deviation of an n-D image array
sum - Sum of the values of the array
variance - Variance of the values of an n-D image array
watershed_ift
Morphology
==========
.. autosummary::
:toctree: generated/
binary_closing
binary_dilation
binary_erosion
binary_fill_holes
binary_hit_or_miss
binary_opening
binary_propagation
black_tophat
distance_transform_bf
distance_transform_cdt
distance_transform_edt
generate_binary_structure
grey_closing
grey_dilation
grey_erosion
grey_opening
iterate_structure
morphological_gradient
morphological_laplace
white_tophat
Utility
=======
.. autosummary::
:toctree: generated/
imread - Load an image from a file
"""
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
from .filters import *
from .fourier import *
from .interpolation import *
from .measurements import *
from .morphology import *
from .io import *
__version__ = '2.0'
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
|
Batterfii/django
|
refs/heads/master
|
tests/migrations/test_migrations_no_ancestor/0001_initial.py
|
2995
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]
|
fingi/csipsimple
|
refs/heads/master
|
jni/pjsip/sources/tests/pjsua/scripts-sendto/110_tel_uri.py
|
59
|
# $Id: 110_tel_uri.py 2451 2009-02-13 10:13:08Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
# Handling of incoming tel: URI.
complete_msg = \
"""INVITE tel:+2065551212 SIP/2.0
Via: SIP/2.0/UDP $LOCAL_IP:$LOCAL_PORT;rport;x-route-tag="tgrp:cococisco1";branch=z9hG4bK61E05
From: <tel:12345>$FROM_TAG
To: <tel:+2065551212>
Date: Thu, 12 Feb 2009 18:32:33 GMT
Call-ID: 58F8F7D6-F86A11DD-8013D591-5694EF79
Supported: 100rel,timer,resource-priority
Min-SE: 86400
Cisco-Guid: 1492551325-4167700957-2148586897-1452601209
User-Agent: Cisco-SIPGateway/IOS-12.x
Allow: INVITE, OPTIONS, BYE, CANCEL, ACK, PRACK, UPDATE, REFER, SUBSCRIBE, NOTIFY, INFO, REGISTER
CSeq: 101 INVITE
Max-Forwards: 70
Timestamp: 1234463553
Contact: <tel:+1234;ext=1>
Contact: <sip:tester@$LOCAL_IP:$LOCAL_PORT>
Record-Route: <sip:tester@$LOCAL_IP:$LOCAL_PORT;lr>
Expires: 180
Allow-Events: telephone-event
Content-Type: application/sdp
Content-Disposition: session;handling=required
Content-Length: 265
v=0
o=CiscoSystemsSIP-GW-UserAgent 1296 9529 IN IP4 X.X.X.X
s=SIP Call
c=IN IP4 $LOCAL_IP
t=0 0
m=audio 18676 RTP/AVP 0 101 19
c=IN IP4 $LOCAL_IP
a=rtpmap:0 PCMU/8000
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-16
a=rtpmap:19 CN/8000
a=ptime:20
"""
sendto_cfg = sip.SendtoCfg( "tel: URI", "--null-audio --auto-answer 200",
"", 200, complete_msg=complete_msg)
|
rosmo/boto
|
refs/heads/develop
|
boto/sqs/attributes.py
|
223
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SQS Attribute Name/Value set
"""
class Attributes(dict):
def __init__(self, parent):
self.parent = parent
self.current_key = None
self.current_value = None
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Attribute':
self[self.current_key] = self.current_value
elif name == 'Name':
self.current_key = value
elif name == 'Value':
self.current_value = value
else:
setattr(self, name, value)
|
christopher-henderson/Experiments
|
refs/heads/master
|
structures/BinaryTree/binaryTree.py
|
2
|
from functools import wraps
from BinaryTreeExceptions import *
from Node import Node
def NotEmpty(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
if self.isEmpty():
raise EmptyTree()
else:
return function(self, *args, **kwargs)
return wrapper
class BinaryTree(object):
def __init__(self, *args):
self.root = None
if len(args) is 0:
#===================================================================
# Default, empty, constructor.
# >>> tree = BinaryTree()
#===================================================================
pass
elif isinstance(args[0], Node):
#===================================================================
# Use the given node as the root of this tree.
#===================================================================
self.root = args[0]
elif '__iter__' in dir(args[0]):
#===================================================================
# Construct the binary tree using the given iterable.
# >>> evens = BinaryTree(number for number in range(101) if number % 2 is 0)
#===================================================================
for element in args[0]:
self.insert(element)
else:
#===================================================================
# Construct the binary tree using all given elements.
# >>> random = BinaryTree(56,7,2,5,8,23)
#===================================================================
for element in args:
self.insert(element)
def __contains__(self, element):
return element in self.root
def __str__(self):
return str(self.inOrder())
def isEmpty(self):
return self.root is None
def insert(self, element):
if self.isEmpty():
self.root = Node(element)
else:
self.root.insert(element)
return self
def inOrder(self):
return tuple(item for item in self.root.inOrder())
def preOrder(self):
return tuple(item for item in self.root.preOrder())
def postOrder(self):
return tuple(item for item in self.root.postOrder())
@NotEmpty
def decendantsOf(self, element):
return self.root.descendants(element)
@NotEmpty
def ancestorsOf(self, element):
return tuple(ancestor for ancestor in self.root.ancestors(element))
@NotEmpty
def isAncestorOf(self, targetAncestor, targetDescendant):
return self.root.isAncestorOf(targetAncestor, targetDescendant)
@NotEmpty
def isDescendantOf(self, targetDescendant, targetAncestor):
return self.root.isAncestorOf(targetAncestor, targetDescendant)
@NotEmpty
def min(self):
return self.root.min()
@NotEmpty
def max(self):
return self.root.max()
@NotEmpty
def root(self):
return self.root.element
@NotEmpty
def detachAt(self, element):
return BinaryTree(self.root.detachAt(element))
@NotEmpty
def levelOf(self, element):
return self.root.levelOf(element)
@NotEmpty
def height(self):
return max(self.root.height())
def attach(self, tree):
if not isinstance(tree, BinaryTree):
raise TypeError('Expected a Node. Received a {CLASS}'.format(CLASS=tree.__class__))
if self.root is None:
self.root = tree
else:
self.root.attach(tree.root)
return self
|
frdb194/django
|
refs/heads/master
|
tests/test_discovery_sample/tests/tests.py
|
641
|
from unittest import TestCase
class Test(TestCase):
def test_sample(self):
pass
|
roryk/tiny-test-data
|
refs/heads/master
|
scripts/fastq_convert.py
|
2
|
from argparse import ArgumentParser
import sys
from Bio import SeqIO
QUALITY_CONVERSION_TYPES = ["fastq-sanger", "fastq-solexa", "fastq-illumina"]
def main(in_file, in_format, out_format):
with open(in_file, "r") as in_handle:
SeqIO.convert(in_handle, in_format, sys.stdout, out_format)
if __name__ == "__main__":
parser = ArgumentParser(description="simple script for converting "
"fastq quality scores.")
parser.add_argument("--in-format", help="Quality format to convert from.",
choices=QUALITY_CONVERSION_TYPES)
parser.add_argument("--out-format", help="Quality format to convert to.",
choices=QUALITY_CONVERSION_TYPES)
parser.add_argument("in_file", help="FASTQ file to convert.")
args = parser.parse_args()
main(args.in_file, args.in_format, args.out_format)
|
cdondrup/teaching
|
refs/heads/indigo-devel
|
pyperplan/src/search/__init__.py
|
2
|
#
# This file is part of pyperplan.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
from .a_star import astar_search, weighted_astar_search, greedy_best_first_search
from .breadth_first_search import breadth_first_search
from .enforced_hillclimbing_search import enforced_hillclimbing_search
from .iterative_deepening_search import iterative_deepening_search
from .sat import sat_solve
from .searchspace import make_root_node, make_child_node
|
hoangt/u-boot
|
refs/heads/openrisc
|
tools/buildman/test.py
|
31
|
#
# Copyright (c) 2012 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import os
import shutil
import sys
import tempfile
import time
import unittest
# Bring in the patman libraries
our_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(our_path, '../patman'))
import board
import bsettings
import builder
import control
import command
import commit
import toolchain
errors = [
'''main.c: In function 'main_loop':
main.c:260:6: warning: unused variable 'joe' [-Wunused-variable]
''',
'''main.c: In function 'main_loop':
main.c:295:2: error: 'fred' undeclared (first use in this function)
main.c:295:2: note: each undeclared identifier is reported only once for each function it appears in
make[1]: *** [main.o] Error 1
make: *** [common/libcommon.o] Error 2
Make failed
''',
'''main.c: In function 'main_loop':
main.c:280:6: warning: unused variable 'mary' [-Wunused-variable]
''',
'''powerpc-linux-ld: warning: dot moved backwards before `.bss'
powerpc-linux-ld: warning: dot moved backwards before `.bss'
powerpc-linux-ld: u-boot: section .text lma 0xfffc0000 overlaps previous sections
powerpc-linux-ld: u-boot: section .rodata lma 0xfffef3ec overlaps previous sections
powerpc-linux-ld: u-boot: section .reloc lma 0xffffa400 overlaps previous sections
powerpc-linux-ld: u-boot: section .data lma 0xffffcd38 overlaps previous sections
powerpc-linux-ld: u-boot: section .u_boot_cmd lma 0xffffeb40 overlaps previous sections
powerpc-linux-ld: u-boot: section .bootpg lma 0xfffff198 overlaps previous sections
'''
]
# hash, subject, return code, list of errors/warnings
commits = [
['1234', 'upstream/master, ok', 0, []],
['5678', 'Second commit, a warning', 0, errors[0:1]],
['9012', 'Third commit, error', 1, errors[0:2]],
['3456', 'Fourth commit, warning', 0, [errors[0], errors[2]]],
['7890', 'Fifth commit, link errors', 1, [errors[0], errors[3]]],
['abcd', 'Sixth commit, fixes all errors', 0, []]
]
boards = [
['board0', 'arm', 'armv7', 'ARM Board 1', 'Tester', '', ''],
['board1', 'arm', 'armv7', 'ARM Board 2', 'Tester', '', ''],
['board2', 'powerpc', 'powerpc', 'PowerPC board 1', 'Tester', '', ''],
['board3', 'powerpc', 'mpc5xx', 'PowerPC board 2', 'Tester', '', ''],
['board4', 'sandbox', 'sandbox', 'Sandbox board', 'Tester', '', '']
]
class Options:
"""Class that holds build options"""
pass
class TestBuild(unittest.TestCase):
"""Test buildman
TODO: Write tests for the rest of the functionality
"""
def setUp(self):
# Set up commits to build
self.commits = []
sequence = 0
for commit_info in commits:
comm = commit.Commit(commit_info[0])
comm.subject = commit_info[1]
comm.return_code = commit_info[2]
comm.error_list = commit_info[3]
comm.sequence = sequence
sequence += 1
self.commits.append(comm)
# Set up boards to build
self.boards = board.Boards()
for brd in boards:
self.boards.AddBoard(board.Board(*brd))
self.boards.SelectBoards([])
# Set up the toolchains
bsettings.Setup()
self.toolchains = toolchain.Toolchains()
self.toolchains.Add('arm-linux-gcc', test=False)
self.toolchains.Add('sparc-linux-gcc', test=False)
self.toolchains.Add('powerpc-linux-gcc', test=False)
self.toolchains.Add('gcc', test=False)
def Make(self, commit, brd, stage, *args, **kwargs):
result = command.CommandResult()
boardnum = int(brd.target[-1])
result.return_code = 0
result.stderr = ''
result.stdout = ('This is the test output for board %s, commit %s' %
(brd.target, commit.hash))
if boardnum >= 1 and boardnum >= commit.sequence:
result.return_code = commit.return_code
result.stderr = ''.join(commit.error_list)
if stage == 'build':
target_dir = None
for arg in args:
if arg.startswith('O='):
target_dir = arg[2:]
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
#time.sleep(.2 + boardnum * .2)
result.combined = result.stdout + result.stderr
return result
def testBasic(self):
"""Test basic builder operation"""
output_dir = tempfile.mkdtemp()
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
build = builder.Builder(self.toolchains, output_dir, None, 1, 2,
checkout=False, show_unknown=False)
build.do_make = self.Make
board_selected = self.boards.GetSelectedDict()
#build.BuildCommits(self.commits, board_selected, False)
build.BuildBoards(self.commits, board_selected, False, False)
build.ShowSummary(self.commits, board_selected, True, False,
False, False)
def _testGit(self):
"""Test basic builder operation by building a branch"""
base_dir = tempfile.mkdtemp()
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
options = Options()
options.git = os.getcwd()
options.summary = False
options.jobs = None
options.dry_run = False
#options.git = os.path.join(base_dir, 'repo')
options.branch = 'test-buildman'
options.force_build = False
options.list_tool_chains = False
options.count = -1
options.git_dir = None
options.threads = None
options.show_unknown = False
options.quick = False
options.show_errors = False
options.keep_outputs = False
args = ['tegra20']
control.DoBuildman(options, args)
if __name__ == "__main__":
unittest.main()
|
cs-au-dk/Artemis
|
refs/heads/master
|
contrib/ajaxinterface/requestpatterns/__init__.py
|
1
|
"""
Copyright 2013 Aarhus University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import itertools
from pprint import pprint
from ajaxinterface.ail.data import AilLine
from data import RequestPattern, get_change_value
def get_value_from_rrpair(rrpair, feature):
args, kwargs = rrpair.request.features
if feature['action'] == 'args':
value = args[feature['index']]
elif feature['action'] == 'kwargs':
value = kwargs[feature['index']]
else:
raise Exception("Unknown action")
if hasattr(value, '__iter__'):
value = ','.join(value)
return value
def _get_feature_cost(lines, features):
observed_values = set()
split_cost = 0
merge_cost = 0
for line in lines:
values = set()
for rrpair in line.sample_rrpairs:
value = [get_value_from_rrpair(rrpair, feature) \
for feature in features]
values.add(tuple(value))
split_cost += len(values) - 1 # split x-1 times for x elements
merge_cost += len(observed_values.intersection(values))
observed_values = observed_values.union(values)
cost = split_cost + merge_cost
return cost, observed_values
def _do_split(lines, features):
if features is None:
features = []
new_lines = {}
for line in lines:
for rrpair in line.sample_rrpairs:
value = []
for feature in features:
value.append(get_value_from_rrpair(rrpair, feature))
value = tuple(value)
if not new_lines.has_key(value):
new_lines[value] = AilLine()
new_lines[value].sample_rrpairs.append(rrpair)
new_lines[value].response_types.update(line.response_types)
for value,line in new_lines.items():
line.request_pattern = RequestPattern(line.sample_rrpairs[0].request)
for feature in features:
line.request_pattern.tighten_feature(feature)
return new_lines.values()
def is_constant_feature(lines, feature):
"""
Constant features: a feature is constant if is always
associates with a constant value. Don't generate permutations
without these.
"""
observed_value = None
for line in lines:
for rrpair in line.sample_rrpairs:
value = get_value_from_rrpair(rrpair, feature)
if observed_value is None:
observed_value = value
elif observed_value != value:
return False
return True
def split_or_merge_lines(lines):
"""
Request pattern clustering
Conducts splitting and merging on the given lines in order to construct a
new set of lines in accordance with request pattern clustering.
Returns [AilLine]
"""
assert(len(lines) >= 2)
#print 'Fixing conflict for %s lines' % len(lines) # debug
features = []
constant_features = []
feature_sets = []
for feature in lines[0].request_pattern.features:
if is_constant_feature(lines, feature):
constant_features.append(feature)
else:
features.append(feature)
for i in xrange(len(features)):
for selected_features in itertools.combinations(features, i+1):
l = list(selected_features)
l.extend(constant_features)
feature_sets.append(tuple(l))
min_values = None
min_feature = None
min_cost = None
for feature_set in feature_sets:
cost, value_pairs = _get_feature_cost(lines, feature_set)
if min_cost is None or \
cost < min_cost or \
(cost == min_cost and len(feature_set) > len(min_feature)):
min_cost = cost
min_feature = feature_set
min_values = value_pairs
#print min_cost, min_feature, min_values, '<-- best'
return _do_split(lines, min_feature)
def refine_ail(partial_ail_spec):
queue = []
queue.extend(partial_ail_spec.lines)
while len(queue) > 0:
line = queue.pop()
# filter empty lines
if len(line.sample_rrpairs) == 0:
partial_ail_spec.lines.remove(line)
continue
line.request_pattern = RequestPattern(line.sample_rrpairs[0].request)
# split line if the requests are disjoint
# e.g. different number of arguments or differet request method
matching = []
nonmatching = []
for rrpair in line.sample_rrpairs:
if line.request_pattern.includes(rrpair.request):
matching.append(rrpair)
else:
nonmatching.append(rrpair)
if len(nonmatching) > 0:
new_line = AilLine()
new_line.response_types = line.response_types
new_line.sample_rrpairs = nonmatching
partial_ail_spec.lines.append(new_line)
queue.append(new_line)
line.sample_rrpairs = matching
conflicting_lines = {}
for line in partial_ail_spec.lines:
signature = line.request_pattern.signature
if not conflicting_lines.has_key(signature):
conflicting_lines[signature] = []
conflicting_lines[signature].append(line)
for signature,lines in conflicting_lines.items():
if len(lines) == 1:
continue
added, removed = split_or_merge_lines(lines), lines
for line in removed:
partial_ail_spec.lines.remove(line)
for line in added:
partial_ail_spec.lines.append(line)
# make it pretty
for line in partial_ail_spec.lines:
for feature in line.request_pattern.features:
values = set()
for rrpair in line.sample_rrpairs:
value = get_value_from_rrpair(rrpair, feature)
values.add(value)
if len(values) == 1:
line.request_pattern.tighten_feature(feature)
return partial_ail_spec
|
ojengwa/grr
|
refs/heads/master
|
lib/data_stores/fake_data_store_test.py
|
2
|
#!/usr/bin/env python
"""Tests the fake data store - in memory implementation."""
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import data_store_test
from grr.lib import flags
from grr.lib import test_lib
class FakeDataStoreTest(data_store_test._DataStoreTest):
"""Test the fake data store."""
def testApi(self):
"""The fake datastore doesn't strictly conform to the api but this is ok."""
class FakeDataStoreBenchmarks(data_store_test.DataStoreBenchmarks):
"""Benchmark the fake data store.
This gives an upper bound on data store performance - since the fake data
store is the most trivial data store and therefore the fastest.
"""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
igor-toga/knob2
|
refs/heads/master
|
knob/common/auth.py
|
1
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import base
from oslo_middleware import request_id
import webob.dec
import webob.exc
from knob.common import context
LOG = logging.getLogger(__name__)
class KnobKeystoneContext(base.ConfigurableMiddleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify
def __call__(self, req):
# Determine the user ID
user_id = req.headers.get('X_USER_ID','')
#if not user_id:
# LOG.debug("X_USER_ID is not found in request")
# return webob.exc.HTTPUnauthorized()
# Determine the tenant
tenant_id = req.headers.get('X_PROJECT_ID','')
# Suck out the roles
roles = [r.strip() for r in req.headers.get('X_ROLES', '').split(',')]
# Human-friendly names
tenant_name = req.headers.get('X_PROJECT_NAME', '')
user_name = req.headers.get('X_USER_NAME', '')
# Use request_id if already set
req_id = req.environ.get(request_id.ENV_REQUEST_ID)
# Get the auth token
auth_token = req.headers.get('X_AUTH_TOKEN')
# Create a context with the authentication data
ctx = context.MyRequestContext(user_id, tenant_id, roles=roles,
user_name=user_name, tenant_name=tenant_name,
request_id=req_id, auth_token=auth_token)
# Inject the context...
req.context = ctx
return self.application
"""
def pipeline_factory(loader, global_conf, **local_conf):
pipeline = local_conf[cfg.CONF.auth_strategy]
pipeline = pipeline.split()
filters = [loader.get_filter(n) for n in pipeline[:-1]]
app = loader.get_app(pipeline[-1])
filters.reverse()
for filter in filters:
app = filter(app)
return app
"""
|
Pathel/deuterium
|
refs/heads/master
|
src/comms/models.py
|
2
|
"""
Models for the comsystem. The Commsystem is intended to be
used by Players (thematic IC communication is probably
best handled by custom commands instead).
The comm system could take the form of channels, but can also
be adopted for storing tells or in-game mail.
The comsystem's main component is the Message (Msg), which
carries the actual information between two parties.
Msgs are stored in the database and usually not
deleted.
A Msg always have one sender (a user), but can have
any number targets, both users and channels.
Channels are central objects that act as targets for
Msgs. Players can connect to channels by use of a
ChannelConnect object (this object is necessary to easily
be able to delete connections on the fly).
"""
from datetime import datetime
from django.conf import settings
from django.db import models
from src.typeclasses.models import TypedObject, TagHandler, AttributeHandler, AliasHandler
from src.utils.idmapper.models import SharedMemoryModel
from src.comms import managers
from src.comms.managers import identify_object
from src.locks.lockhandler import LockHandler
from src.utils.utils import crop, make_iter, lazy_property
__all__ = ("Msg", "TempMsg", "ChannelDB")
_GA = object.__getattribute__
_SA = object.__setattr__
_DA = object.__delattr__
#------------------------------------------------------------
#
# Msg
#
#------------------------------------------------------------
class Msg(SharedMemoryModel):
"""
A single message. This model describes all ooc messages
sent in-game, both to channels and between players.
The Msg class defines the following properties:
sender - sender of message
receivers - list of target objects for message
channels - list of channels message was sent to
message - the text being sent
date_sent - time message was sent
hide_from_sender - bool if message should be hidden from sender
hide_from_receivers - list of receiver objects to hide message from
hide_from_channels - list of channels objects to hide message from
permissions - perm strings
"""
#
# Msg database model setup
#
#
# These databse fields are all set using their corresponding properties,
# named same as the field, but withtout the db_* prefix.
# Sender is either a player, an object or an external sender, like
# an IRC channel; normally there is only one, but if co-modification of
# a message is allowed, there may be more than one "author"
db_sender_players = models.ManyToManyField("players.PlayerDB", related_name='sender_player_set', null=True, verbose_name='sender(player)', db_index=True)
db_sender_objects = models.ManyToManyField("objects.ObjectDB", related_name='sender_object_set', null=True, verbose_name='sender(object)', db_index=True)
db_sender_external = models.CharField('external sender', max_length=255, null=True, db_index=True,
help_text="identifier for external sender, for example a sender over an IRC connection (i.e. someone who doesn't have an exixtence in-game).")
# The destination objects of this message. Stored as a
# comma-separated string of object dbrefs. Can be defined along
# with channels below.
db_receivers_players = models.ManyToManyField('players.PlayerDB', related_name='receiver_player_set', null=True, help_text="player receivers")
db_receivers_objects = models.ManyToManyField('objects.ObjectDB', related_name='receiver_object_set', null=True, help_text="object receivers")
db_receivers_channels = models.ManyToManyField("ChannelDB", related_name='channel_set', null=True, help_text="channel recievers")
# header could be used for meta-info about the message if your system needs
# it, or as a separate store for the mail subject line maybe.
db_header = models.TextField('header', null=True, blank=True)
# the message body itself
db_message = models.TextField('messsage')
# send date
db_date_sent = models.DateTimeField('date sent', editable=False, auto_now_add=True, db_index=True)
# lock storage
db_lock_storage = models.TextField('locks', blank=True,
help_text='access locks on this message.')
# these can be used to filter/hide a given message from supplied objects/players/channels
db_hide_from_players = models.ManyToManyField("players.PlayerDB", related_name='hide_from_players_set', null=True)
db_hide_from_objects = models.ManyToManyField("objects.ObjectDB", related_name='hide_from_objects_set', null=True)
db_hide_from_channels = models.ManyToManyField("ChannelDB", related_name='hide_from_channels_set', null=True)
# Database manager
objects = managers.MsgManager()
_is_deleted = False
def __init__(self, *args, **kwargs):
SharedMemoryModel.__init__(self, *args, **kwargs)
self.extra_senders = []
class Meta:
"Define Django meta options"
verbose_name = "Message"
# Wrapper properties to easily set database fields. These are
# @property decorators that allows to access these fields using
# normal python operations (without having to remember to save()
# etc). So e.g. a property 'attr' has a get/set/del decorator
# defined that allows the user to do self.attr = value,
# value = self.attr and del self.attr respectively (where self
# is the object in question).
# sender property (wraps db_sender_*)
#@property
def __senders_get(self):
"Getter. Allows for value = self.sender"
return [hasattr(o, "typeclass") and o.typeclass or o for o in
list(self.db_sender_players.all()) +
list(self.db_sender_objects.all()) +
self.extra_senders]
#@sender.setter
def __senders_set(self, value):
"Setter. Allows for self.sender = value"
for val in (v for v in make_iter(value) if v):
obj, typ = identify_object(val)
if typ == 'player':
self.db_sender_players.add(obj)
elif typ == 'object':
self.db_sender_objects.add(obj)
elif isinstance(typ, basestring):
self.db_sender_external = obj
elif not obj:
return
else:
raise ValueError(obj)
self.save()
#@sender.deleter
def __senders_del(self):
"Deleter. Clears all senders"
self.db_sender_players.clear()
self.db_sender_objects.clear()
self.db_sender_external = ""
self.extra_senders = []
self.save()
senders = property(__senders_get, __senders_set, __senders_del)
def remove_sender(self, value):
"Remove a single sender or a list of senders"
for val in make_iter(value):
obj, typ = identify_object(val)
if typ == 'player':
self.db_sender_players.remove(obj)
elif typ == 'object':
self.db_sender_objects.remove(obj)
elif isinstance(obj, basestring):
self.db_sender_external = obj
else:
raise ValueError(obj)
self.save()
# receivers property
#@property
def __receivers_get(self):
"""
Getter. Allows for value = self.receivers.
Returns three lists of receivers: players, objects and channels.
"""
return [hasattr(o, "typeclass") and o.typeclass or o for o in
list(self.db_receivers_players.all()) + list(self.db_receivers_objects.all())]
#@receivers.setter
def __receivers_set(self, value):
"""
Setter. Allows for self.receivers = value.
This appends a new receiver to the message.
"""
for val in (v for v in make_iter(value) if v):
obj, typ = identify_object(val)
if typ == 'player':
self.db_receivers_players.add(obj)
elif typ == 'object':
self.db_receivers_objects.add(obj)
elif not obj:
return
else:
raise ValueError
self.save()
#@receivers.deleter
def __receivers_del(self):
"Deleter. Clears all receivers"
self.db_receivers_players.clear()
self.db_receivers_objects.clear()
self.extra_senders = []
self.save()
receivers = property(__receivers_get, __receivers_set, __receivers_del)
def remove_receiver(self, obj):
"Remove a single recevier"
obj, typ = identify_object(obj)
if typ == 'player':
self.db_receivers_players.remove(obj)
elif typ == 'object':
self.db_receivers_objects.remove(obj)
else:
raise ValueError
self.save()
# channels property
#@property
def __channels_get(self):
"Getter. Allows for value = self.channels. Returns a list of channels."
return self.db_receivers_channels.all()
#@channels.setter
def __channels_set(self, value):
"""
Setter. Allows for self.channels = value.
Requires a channel to be added."""
for val in (v.dbobj for v in make_iter(value) if v):
self.db_receivers_channels.add(val)
#@channels.deleter
def __channels_del(self):
"Deleter. Allows for del self.channels"
self.db_receivers_channels.clear()
self.save()
channels = property(__channels_get, __channels_set, __channels_del)
def __hide_from_get(self):
"""
Getter. Allows for value = self.hide_from.
Returns 3 lists of players, objects and channels
"""
return self.db_hide_from_players.all(), self.db_hide_from_objects.all(), self.db_hide_from_channels.all()
#@hide_from_sender.setter
def __hide_from_set(self, value):
"Setter. Allows for self.hide_from = value. Will append to hiders"
obj, typ = identify_object(value)
if typ == "player":
self.db_hide_from_players.add(obj)
elif typ == "object":
self.db_hide_from_objects.add(obj)
elif typ == "channel":
self.db_hide_from_channels.add(obj)
else:
raise ValueError
self.save()
#@hide_from_sender.deleter
def __hide_from_del(self):
"Deleter. Allows for del self.hide_from_senders"
self.db_hide_from_players.clear()
self.db_hide_from_objects.clear()
self.db_hide_from_channels.clear()
self.save()
hide_from = property(__hide_from_get, __hide_from_set, __hide_from_del)
#
# Msg class methods
#
def __str__(self):
"This handles what is shown when e.g. printing the message"
senders = ",".join(obj.key for obj in self.senders)
receivers = ",".join(["[%s]" % obj.key for obj in self.channels] + [obj.key for obj in self.receivers])
return "%s->%s: %s" % (senders, receivers, crop(self.message, width=40))
#------------------------------------------------------------
#
# TempMsg
#
#------------------------------------------------------------
class TempMsg(object):
"""
This is a non-persistent object for sending
temporary messages that will not be stored.
It mimics the "real" Msg object, but don't require
sender to be given.
"""
def __init__(self, senders=None, receivers=None, channels=None, message="", header="", type="", lockstring="", hide_from=None):
self.senders = senders and make_iter(senders) or []
self.receivers = receivers and make_iter(receivers) or []
self.channels = channels and make_iter(channels) or []
self.type = type
self.header = header
self.message = message
self.lock_storage = lockstring
self.hide_from = hide_from and make_iter(hide_from) or []
self.date_sent = datetime.now()
@lazy_property
def locks(self):
return LockHandler(self)
def __str__(self):
"This handles what is shown when e.g. printing the message"
senders = ",".join(obj.key for obj in self.senders)
receivers = ",".join(["[%s]" % obj.key for obj in self.channels] + [obj.key for obj in self.receivers])
return "%s->%s: %s" % (senders, receivers, crop(self.message, width=40))
def remove_sender(self, obj):
"Remove a sender or a list of senders"
for o in make_iter(obj):
try:
self.senders.remove(o)
except ValueError:
pass # nothing to remove
def remove_receiver(self, obj):
"Remove a sender or a list of senders"
for o in make_iter(obj):
try:
self.senders.remove(o)
except ValueError:
pass # nothing to remove
def access(self, accessing_obj, access_type='read', default=False):
"checks lock access"
return self.locks.check(accessing_obj,
access_type=access_type, default=default)
#------------------------------------------------------------
#
# Channel
#
#------------------------------------------------------------
class ChannelDB(TypedObject):
"""
This is the basis of a comm channel, only implementing
the very basics of distributing messages.
The Channel class defines the following properties:
key - main name for channel
desc - optional description of channel
aliases - alternative names for the channel
keep_log - bool if the channel should remember messages
permissions - perm strings
"""
db_subscriptions = models.ManyToManyField("players.PlayerDB",
related_name="subscription_set", null=True, verbose_name='subscriptions', db_index=True)
# Database manager
objects = managers.ChannelManager()
_typeclass_paths = settings.CHANNEL_TYPECLASS_PATHS
_default_typeclass_path = settings.BASE_CHANNEL_TYPECLASS or "src.comms.comms.Channel"
class Meta:
"Define Django meta options"
verbose_name = "Channel"
verbose_name_plural = "Channels"
#
# Channel class methods
#
def __str__(self):
return "Channel '%s' (%s)" % (self.key, self.typeclass.db.desc)
def has_connection(self, player):
"""
Checks so this player is actually listening
to this channel.
"""
if hasattr(player, "player"):
player = player.player
player = player.dbobj
return player in self.db_subscriptions.all()
def connect(self, player):
"Connect the user to this channel. This checks access."
if hasattr(player, "player"):
player = player.player
player = player.typeclass
# check access
if not self.access(player, 'listen'):
return False
# pre-join hook
connect = self.typeclass.pre_join_channel(player)
if not connect:
return False
# subscribe
self.db_subscriptions.add(player.dbobj)
# post-join hook
self.typeclass.post_join_channel(player)
return True
def disconnect(self, player):
"Disconnect user from this channel."
if hasattr(player, "player"):
player = player.player
player = player.typeclass
# pre-disconnect hook
disconnect = self.typeclass.pre_leave_channel(player)
if not disconnect:
return False
# disconnect
self.db_subscriptions.remove(player.dbobj)
# post-disconnect hook
self.typeclass.post_leave_channel(player.dbobj)
return True
def access(self, accessing_obj, access_type='listen', default=False):
"""
Determines if another object has permission to access.
accessing_obj - object trying to access this one
access_type - type of access sought
default - what to return if no lock of access_type was found
"""
return self.locks.check(accessing_obj, access_type=access_type, default=default)
def delete(self):
"""
Deletes channel while also cleaning up channelhandler
"""
_GA(self, "attributes").clear()
_GA(self, "aliases").clear()
super(ChannelDB, self).delete()
from src.comms.channelhandler import CHANNELHANDLER
CHANNELHANDLER.update()
|
FujiZ/ns-3
|
refs/heads/master
|
src/antenna/bindings/modulegen__gcc_LP64.py
|
38
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.antenna', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## angles.h (module 'antenna'): ns3::Angles [struct]
module.add_class('Angles')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## antenna-model.h (module 'antenna'): ns3::AntennaModel [class]
module.add_class('AntennaModel', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel [class]
module.add_class('CosineAntennaModel', parent=root_module['ns3::AntennaModel'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel [class]
module.add_class('IsotropicAntennaModel', parent=root_module['ns3::AntennaModel'])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel [class]
module.add_class('ParabolicAntennaModel', parent=root_module['ns3::AntennaModel'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3Angles_methods(root_module, root_module['ns3::Angles'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AntennaModel_methods(root_module, root_module['ns3::AntennaModel'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3CosineAntennaModel_methods(root_module, root_module['ns3::CosineAntennaModel'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3IsotropicAntennaModel_methods(root_module, root_module['ns3::IsotropicAntennaModel'])
register_Ns3ParabolicAntennaModel_methods(root_module, root_module['ns3::ParabolicAntennaModel'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Angles_methods(root_module, cls):
cls.add_output_stream_operator()
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Angles const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Angles const &', 'arg0')])
## angles.h (module 'antenna'): ns3::Angles::Angles() [constructor]
cls.add_constructor([])
## angles.h (module 'antenna'): ns3::Angles::Angles(double phi, double theta) [constructor]
cls.add_constructor([param('double', 'phi'), param('double', 'theta')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v) [constructor]
cls.add_constructor([param('ns3::Vector', 'v')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v, ns3::Vector o) [constructor]
cls.add_constructor([param('ns3::Vector', 'v'), param('ns3::Vector', 'o')])
## angles.h (module 'antenna'): ns3::Angles::phi [variable]
cls.add_instance_attribute('phi', 'double', is_const=False)
## angles.h (module 'antenna'): ns3::Angles::theta [variable]
cls.add_instance_attribute('theta', 'double', is_const=False)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AntennaModel_methods(root_module, cls):
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel(ns3::AntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AntennaModel const &', 'arg0')])
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel() [constructor]
cls.add_constructor([])
## antenna-model.h (module 'antenna'): double ns3::AntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_pure_virtual=True, is_virtual=True)
## antenna-model.h (module 'antenna'): static ns3::TypeId ns3::AntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3CosineAntennaModel_methods(root_module, cls):
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel() [constructor]
cls.add_constructor([])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel(ns3::CosineAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CosineAntennaModel const &', 'arg0')])
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetBeamwidth() const [member function]
cls.add_method('GetBeamwidth',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::CosineAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function]
cls.add_method('SetBeamwidth',
'void',
[param('double', 'beamwidthDegrees')])
## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetOrientation(double orientationDegrees) [member function]
cls.add_method('SetOrientation',
'void',
[param('double', 'orientationDegrees')])
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3IsotropicAntennaModel_methods(root_module, cls):
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel(ns3::IsotropicAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IsotropicAntennaModel const &', 'arg0')])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel() [constructor]
cls.add_constructor([])
## isotropic-antenna-model.h (module 'antenna'): double ns3::IsotropicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## isotropic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::IsotropicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3ParabolicAntennaModel_methods(root_module, cls):
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel() [constructor]
cls.add_constructor([])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel(ns3::ParabolicAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParabolicAntennaModel const &', 'arg0')])
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetBeamwidth() const [member function]
cls.add_method('GetBeamwidth',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::ParabolicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function]
cls.add_method('SetBeamwidth',
'void',
[param('double', 'beamwidthDegrees')])
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetOrientation(double orientationDegrees) [member function]
cls.add_method('SetOrientation',
'void',
[param('double', 'orientationDegrees')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## angles.h (module 'antenna'): extern double ns3::DegreesToRadians(double degrees) [free function]
module.add_function('DegreesToRadians',
'double',
[param('double', 'degrees')])
## angles.h (module 'antenna'): extern double ns3::RadiansToDegrees(double radians) [free function]
module.add_function('RadiansToDegrees',
'double',
[param('double', 'radians')])
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
jackxiang/jaikuengine
|
refs/heads/master
|
common/test/domain.py
|
34
|
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urlparse
from django.conf import settings
from common import clean
from common.test import base
from common.test import util
class DomainTest(base.ViewTestCase):
domain = 'www.jaikuengine.com'
gae_domain = 'jaikuengine.appspot.com'
hosted_domain = 'jaikuengine.com'
def get_with_host(self, url, host, ssl=False):
params = {'path': url,
'SERVER_NAME': host,
}
if ssl:
params['wsgi.url_scheme'] = 'https'
params['SERVER_PORT'] = '443'
return self.client.get(**params)
def post_with_host(self, url, data, host, ssl=False):
params = {'path': url,
'SERVER_NAME': host,
'data': data,
}
if ssl:
params['wsgi.url_scheme'] = 'https'
params['SERVER_PORT'] = '443'
return self.client.post(**params)
# some data driven testing
def check_domain_redirects(self, requests, **overrides):
o = util.override(**overrides)
for url, redirect in requests:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
if redirect:
e_scheme, e_netloc, e_path, e_params, e_query, e_fragment = \
urlparse.urlparse(redirect)
if scheme == 'https':
ssl = True
else:
ssl = False
r = self.get_with_host(path, host=netloc, ssl=ssl)
if redirect:
self.assertRedirectsPrefix(r, redirect, status_code=301)
else:
self.assertEqual(r.status_code, 200)
o.reset()
def test_hosted_domain_redirect(self):
bad_hosts = ['www.somewhere.com',
'somewhere.com',
'jaikuengine.com',
self.gae_domain,
]
good_host = self.domain
base_url = 'http://%s/tour'
ssl_url = 'https://%s/tour'
bad_requests = [(base_url % host, base_url % good_host)
for host in bad_hosts]
ssl_requests = [(ssl_url % host, base_url % good_host)
for host in bad_hosts]
good_requests = [(base_url % good_host, None)]
requests = bad_requests + ssl_requests + good_requests
# check with SSL_LOGIN_ENABLED
self.check_domain_redirects(requests,
DOMAIN=self.domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=False,
SSL_LOGIN_ENABLED=True,
)
# check with subdomains enabled
self.check_domain_redirects(requests,
DOMAIN=self.domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
SSL_LOGIN_ENABLED=True,
)
# check without SSL_LOGIN_ENABLED
self.check_domain_redirects(requests,
DOMAIN=self.domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=False,
SSL_LOGIN_ENABLED=False,
)
# check with subdomains enabled
self.check_domain_redirects(requests,
DOMAIN=self.domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
SSL_LOGIN_ENABLED=False,
)
def test_hosted_domain_redirect_login_ssl(self):
bad_hosts = ['www.somewhere.com',
'somewhere.com',
'jaikuengine.com',
]
good_host = self.gae_domain
base_url = 'http://%s/login'
ssl_url = 'https://%s/login'
bad_domain_and_ssl = [(base_url % host, ssl_url % good_host)
for host in bad_hosts]
bad_domain = [(ssl_url % host, ssl_url % good_host)
for host in bad_hosts]
bad_ssl = [(base_url % good_host, ssl_url % good_host)]
good = [(ssl_url % good_host, None)]
requests = (bad_domain_and_ssl
+ bad_domain
+ bad_ssl
+ good)
# check with SSL_LOGIN_ENABLED
self.check_domain_redirects(requests,
DOMAIN=self.domain,
GAE_DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=False,
SSL_LOGIN_ENABLED=True,
)
# check with subdomains enabled
self.check_domain_redirects(requests,
DOMAIN=self.domain,
GAE_DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
SSL_LOGIN_ENABLED=True,
)
def test_hosted_domain_redirect_login(self):
bad_hosts = ['www.somewhere.com',
'somewhere.com',
'jaikuengine.com',
self.gae_domain
]
good_host = self.domain
base_url = 'http://%s/login'
ssl_url = 'https://%s/login'
bad_domain_and_ssl = [(ssl_url % host, base_url % good_host)
for host in bad_hosts]
bad_domain = [(base_url % host, base_url % good_host)
for host in bad_hosts]
bad_ssl = [(ssl_url % good_host, base_url % good_host)]
good = [(base_url % good_host, None)]
requests = (bad_domain_and_ssl
+ bad_domain
+ bad_ssl
+ good)
# check without SSL_LOGIN_ENABLED
self.check_domain_redirects(requests,
DOMAIN=self.domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=False,
SSL_LOGIN_ENABLED=False,
)
# check with subdomains enabled
self.check_domain_redirects(requests,
DOMAIN=self.domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
SSL_LOGIN_ENABLED=False,
)
def test_redirect(self):
bad_hosts = ['www.somewhere.com',
'somewhere.com',
'jaikuengine.com',
self.domain,
]
good_host = self.gae_domain
base_url = 'http://%s/tour'
ssl_url = 'https://%s/tour'
bad_domain = [(base_url % host, base_url % good_host)
for host in bad_hosts]
bad_domain_and_ssl = [(ssl_url % host, base_url % good_host)
for host in bad_hosts]
bad_ssl = [(ssl_url % good_host, base_url % good_host)]
good = [(base_url % good_host, None)]
requests = bad_domain + bad_domain_and_ssl + bad_ssl + good
# check with SSL_LOGIN_ENABLED
self.check_domain_redirects(requests,
DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=False,
SUBDOMAINS_ENABLED=False,
SSL_LOGIN_ENABLED=True,
)
# check with subdomains enabled
self.check_domain_redirects(requests,
DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=False,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
SSL_LOGIN_ENABLED=True,
)
# check without SSL_LOGIN_ENABLED
self.check_domain_redirects(requests,
DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=False,
SUBDOMAINS_ENABLED=False,
SSL_LOGIN_ENABLED=False,
)
# check with subdomains enabled
self.check_domain_redirects(requests,
DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=False,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
SSL_LOGIN_ENABLED=False,
)
def test_redirect_login(self):
bad_hosts = ['www.somewhere.com',
'somewhere.com',
'jaikuengine.com',
]
good_host = self.gae_domain
base_url = 'http://%s/login'
ssl_url = 'https://%s/login'
bad_domain_and_ssl = [(ssl_url % host, base_url % good_host)
for host in bad_hosts]
bad_domain = [(base_url % host, base_url % good_host)
for host in bad_hosts]
bad_ssl = [(ssl_url % good_host, base_url % good_host)]
good = [(base_url % good_host, None)]
requests = (bad_domain_and_ssl
+ bad_domain
+ bad_ssl
+ good)
# check without SSL_LOGIN_ENABLED
self.check_domain_redirects(requests,
DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=False,
SUBDOMAINS_ENABLED=False,
SSL_LOGIN_ENABLED=False,
)
# check with subdomains enabled
self.check_domain_redirects(requests,
DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=False,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
SSL_LOGIN_ENABLED=False,
)
def test_redirect_login_ssl(self):
bad_hosts = ['www.somewhere.com',
'somewhere.com',
'jaikuengine.com',
]
good_host = self.gae_domain
base_url = 'http://%s/login'
ssl_url = 'https://%s/login'
bad_domain_and_ssl = [(base_url % host, ssl_url % good_host)
for host in bad_hosts]
bad_domain = [(ssl_url % host, ssl_url % good_host)
for host in bad_hosts]
bad_ssl = [(base_url % good_host, ssl_url % good_host)]
good = [(ssl_url % good_host, None)]
requests = (bad_domain_and_ssl
+ bad_domain
+ bad_ssl
+ good)
# check with SSL_LOGIN_ENABLED
self.check_domain_redirects(requests,
DOMAIN=self.gae_domain,
GAE_DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=False,
SUBDOMAINS_ENABLED=False,
SSL_LOGIN_ENABLED=True,
)
# check with subdomains enabled
self.check_domain_redirects(requests,
DOMAIN=self.gae_domain,
GAE_DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=False,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
SSL_LOGIN_ENABLED=True,
)
def test_login_sso(self):
o = util.override(DOMAIN=self.domain,
GAE_DOMAIN=self.gae_domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
SSL_LOGIN_ENABLED=True,
)
r = self.post_with_host('/login',
{'log': 'popular',
'pwd': self.passwords[clean.nick('popular')]
},
self.gae_domain,
ssl=True
)
check_redirect = 'http://%s/login/noreally' % self.domain
r = self.assertRedirectsPrefix(r,
check_redirect,
status_code=302,
target_status_code=302)
r = self.assertRedirectsPrefix(r,
'/user/popular/overview',
status_code=302,
target_status_code=200)
o.reset()
def test_api_subdomain(self):
self.override = util.override(DOMAIN=self.domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
)
r = self.get_with_host('/docs', host='api.%s' % self.hosted_domain)
self.assertContains(r, 'Documentation')
r = self.get_with_host('/', host='api.%s' % self.hosted_domain)
r = self.assertRedirectsPrefix(r,
'http://api.%s/docs' % self.hosted_domain,
status_code=301
)
self.assertContains(r, 'Documentation')
def test_blank_wildcard_subdomain(self):
self.override = util.override(DOMAIN=self.domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
WILDCARD_USER_SUBDOMAINS_ENABLED=True
)
r = self.get_with_host('', host='%s' % self.hosted_domain)
r = self.assertRedirectsPrefix(r,
'http://www.%s' % self.hosted_domain,
status_code=301)
self.assertContains(r, 'test entry')
def test_wildcard_subdomain(self):
self.override = util.override(DOMAIN=self.domain,
HOSTED_DOMAIN_ENABLED=True,
HOSTED_DOMAIN=self.hosted_domain,
SUBDOMAINS_ENABLED=True,
WILDCARD_USER_SUBDOMAINS_ENABLED=True
)
r = self.get_with_host('', host='popular.%s' % self.hosted_domain)
self.assertContains(r, 'test entry')
# TODO(termie): remove this, once the temporary fix is removed that breaks
# all these tests
del DomainTest
|
was4444/chromium.src
|
refs/heads/nw15
|
third_party/mesa/generate_git_sha1.py
|
167
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import os.path
import sys
output = sys.argv[1]
parentdir = os.path.abspath(os.path.join(output, os.pardir))
#The original body of this file is generated by this bash script:
#
#touch "${DIR}/git_sha1.h.tmp"
#if test -d .git; then \
# if which git > /dev/null; then \
# git log -n 1 --oneline | \
# sed 's/^\([^ ]*\) .*/#define MESA_GIT_SHA1 "git-\1"/' \
# > "${DIR}/git_sha1.h.tmp" ; \
# fi \
# fi
#if ! cmp -s "${DIR}/git_sha1.h.tmp" "${DIR}/git_sha1.h"; then \
# mv "${DIR}/git_sha1.h.tmp" "${DIR}/git_sha1.h" ;\
# else \
# rm "${DIR}/git_sha1.h.tmp" ;\
# fi
#
#However, Chromium shouldn't depend on Bash, and this preprocessor macro isn't
#neccessary in the first place
if not os.path.isdir(parentdir):
os.makedirs(parentdir)
with open(output, "w") as f:
pass
|
bobrathbone/piradio
|
refs/heads/master
|
piface_remote.py
|
1
|
#!/usr/bin/env python
#
# Raspberry Pi PiFace remote control daemon
# $Id: piface_remote.py,v 1.6 2015/03/14 13:21:18 bob Exp $
#
# Author : Bob Rathbone
# Site : http://www.bobrathbone.com
#
# This program uses the piface CAD libraries
# See http://www.piface.org.uk/products/piface_control_and_display/
#
# License: GNU V3, See https://www.gnu.org/copyleft/gpl.html
#
# Disclaimer: Software is provided as is and absolutly no warranties are implied or given.
# The authors shall not be liable for any loss or damage however caused.
#
# The important configuration files are
# /etc/lirc/lircrc Program to event registration file
# /etc/lircd.conf User generated remote control configuration file
#
import RPi.GPIO as GPIO
import pifacecad.ir
import sys
import os
import time
import signal
from signal import SIGUSR1
# Radio project imports
from rc_daemon import Daemon
from log_class import Log
log = Log()
IR_LED=11 # GPIO 11 pin 23
muted = False
pidfile = '/var/run/radiod.pid'
# Signal SIGTERM handler
def signalHandler(signal,frame):
global log
pid = os.getpid()
log.message("Remote control stopped, PID " + str(pid), log.INFO)
sys.exit(0)
# Daemon class
class MyDaemon(Daemon):
def run(self):
log.init('radio')
signal.signal(signal.SIGHUP,signalHandler)
progcall = str(sys.argv)
log.message('Remote control running pid ' + str(os.getpid()), log.INFO)
exec_cmd('sudo service lirc start')
GPIO.setwarnings(False) # Disable warnings
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
GPIO.setup(IR_LED, GPIO.OUT) # Output LED
listener()
def status(self):
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "Remote control status: not running"
log.message(message, log.INFO)
print message
else:
message = "Remote control running pid " + str(pid)
log.message(message, log.INFO)
print message
return
# End of class overrides
# Handle events
def print_ir_code(event):
global muted
message = "Remote:", event.ir_code
log.message(message, log.DEBUG)
GPIO.output(IR_LED, True)
key = event.ir_code
if key == 'KEY_VOLUMEUP':
exec_cmd('mpc play')
exec_cmd('mpc volume +5')
elif key == 'KEY_VOLUMEDOWN':
exec_cmd('mpc play')
exec_cmd('mpc volume -5')
elif key == 'KEY_CHANNELUP':
exec_cmd('mpc next')
elif key == 'KEY_CHANNELDOWN':
exec_cmd('mpc prev')
elif key == 'KEY_MUTE':
if not muted:
exec_cmd('mpc pause')
muted = True
else:
exec_cmd('mpc play')
exec_cmd('mpc volume +1')
muted = False
elif key == 'KEY_MENU':
if os.path.exists(pidfile):
pf = file(pidfile,'r')
pid = int(pf.read().strip())
pf.close()
os.kill(pid, SIGUSR1)
GPIO.output(IR_LED, False)
return
# Execute system command
def exec_cmd(cmd):
log.message(cmd, log.DEBUG)
p = os.popen(cmd)
result = p.readline().rstrip('\n')
return result
# The main Remote control listen routine
def listener():
log.message("Remote: setup listener", log.DEBUG)
listener = pifacecad.ir.IREventListener(prog="piradio")
listener.register('KEY_VOLUMEUP',print_ir_code)
listener.register('KEY_VOLUMEDOWN',print_ir_code)
listener.register('KEY_CHANNELUP',print_ir_code)
listener.register('KEY_CHANNELDOWN',print_ir_code)
listener.register('KEY_MENU',print_ir_code)
listener.register('KEY_MUTE',print_ir_code)
print "Activating"
listener.activate()
### Main routine ###
if __name__ == "__main__":
daemon = MyDaemon('/var/run/remote.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'status' == sys.argv[1]:
daemon.status()
elif 'version' == sys.argv[1]:
print "Version 0.1"
else:
print "Unknown command: " + sys.argv[1]
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|status|version" % sys.argv[0]
sys.exit(2)
# End of script
|
ChinaQuants/PTVS
|
refs/heads/master
|
Python/Tests/TestData/VirtualEnv/env/Lib/site.py
|
25
|
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, fob, oar and spam, and two path
configuration files, fob.pth and oar.pth. Assume fob.pth contains the
following:
# fob package configuration
fob
oar
bletch
and oar.pth contains:
# oar package configuration
oar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/oar
/usr/local/lib/python2.X/site-packages/fob
Note that bletch is omitted because it doesn't exist; oar precedes fob
because oar.pth comes alphabetically before fob.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://codespeak.net/pypy")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', 'modified-%s' % cpyver),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, 'implementation', sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, 'lib',
'python'+sys.version[:3],
'plat-%s' % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
|
saurabh6790/omni-apps
|
refs/heads/master
|
setup/page/setup_wizard/test_setup_wizard.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from setup.page.setup_wizard.test_setup_data import args
from setup.page.setup_wizard.setup_wizard import setup_account
if __name__=="__main__":
webnotes.connect()
webnotes.local.form_dict = webnotes._dict(args)
setup_account()
|
837468220/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/build/lib/gdata/tlslite/integration/IntegrationHelper.py
|
286
|
class IntegrationHelper:
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
|
Maximilian-Reuter/SickRage-1
|
refs/heads/master
|
lib/js2py/translators/jsregexps.py
|
33
|
from pyjsparserdata import *
REGEXP_SPECIAL_SINGLE = {'\\', '^', '$', '*', '+', '?', '.'}
NOT_PATTERN_CHARS = {'^', '$', '\\', '.', '*', '+', '?', '(', ')', '[', ']', '|'} # what about '{', '}', ???
CHAR_CLASS_ESCAPE = {'d', 'D', 's', 'S', 'w', 'W'}
CONTROL_ESCAPE_CHARS = {'f', 'n', 'r', 't', 'v'}
CONTROL_LETTERS = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'}
def SpecialChar(char):
return {'type': 'SpecialChar',
'content': char}
def isPatternCharacter(char):
return char not in NOT_PATTERN_CHARS
class JsRegExpParser:
def __init__(self, source, flags):
self.source = source
self.flags = flags
self.index = 0
self.length = len(source)
self.lineNumber = 0
self.lineStart = 0
def parsePattern(self):
'''Perform sctring escape - for regexp literals'''
return {'type': 'Pattern',
'contents': self.parseDisjunction()}
def parseDisjunction(self):
alternatives = []
while True:
alternatives.append(self.parseAlternative())
if not self.isEOF():
self.expect_character('|')
else:
break
return {'type': 'Disjunction',
'contents': alternatives}
def isEOF(self):
if self.index>=self.length:
return True
return False
def expect_character(self, character):
if self.source[self.index]!=character:
self.throwUnexpected(character)
self.index += 1
def parseAlternative(self):
contents = []
while not self.isEOF() and self.source[self.index]!='|':
contents.append(self.parseTerm())
return {'type': 'Alternative',
'contents': contents}
def follows(self, chars):
for i, c in enumerate(chars):
if self.index+i>=self.length or self.source[self.index+i] != c:
return False
return True
def parseTerm(self):
assertion = self.parseAssertion()
if assertion:
return assertion
else:
return {'type': 'Term',
'contents': self.parseAtom()} # quantifier will go inside atom!
def parseAssertion(self):
if self.follows('$'):
content = SpecialChar('$')
self.index += 1
elif self.follows('^'):
content = SpecialChar('^')
self.index += 1
elif self.follows('\\b'):
content = SpecialChar('\\b')
self.index += 2
elif self.follows('\\B'):
content = SpecialChar('\\B')
self.index += 2
elif self.follows('(?='):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': False}
elif self.follows('(?!'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = {'type': 'Lookached',
'contents': dis,
'negated': True}
else:
return None
return {'type': 'Assertion',
'content': content}
def parseAtom(self):
if self.follows('.'):
content = SpecialChar('.')
self.index += 1
elif self.follows('\\'):
self.index += 1
content = self.parseAtomEscape()
elif self.follows('['):
content = self.parseCharacterClass()
elif self.follows('(?:'):
self.index += 3
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif self.follows('('):
self.index += 1
dis = self.parseDisjunction()
self.expect_character(')')
content = 'idk'
elif isPatternCharacter(self.source[self.index]):
content = self.source[self.index]
self.index += 1
else:
return None
quantifier = self.parseQuantifier()
return {'type': 'Atom',
'content': content,
'quantifier': quantifier}
def parseQuantifier(self):
prefix = self.parseQuantifierPrefix()
if not prefix:
return None
greedy = True
if self.follows('?'):
self.index += 1
greedy = False
return {'type': 'Quantifier',
'contents': prefix,
'greedy': greedy}
def parseQuantifierPrefix(self):
if self.isEOF():
return None
if self.follows('+'):
content = '+'
self.index += 1
elif self.follows('?'):
content = '?'
self.index += 1
elif self.follows('*'):
content = '*'
self.index += 1
elif self.follows('{'): # try matching otherwise return None and restore the state
i = self.index
self.index += 1
digs1 = self.scanDecimalDigs()
# if no minimal number of digs provided then return no quantifier
if not digs1:
self.index = i
return None
# scan char limit if provided
if self.follows(','):
self.index += 1
digs2 = self.scanDecimalDigs()
else:
digs2 = ''
# must be valid!
if not self.follows('}'):
self.index = i
return None
else:
self.expect_character('}')
content = int(digs1), int(digs2) if digs2 else None
else:
return None
return content
def parseAtomEscape(self):
ch = self.source[self.index]
if isDecimalDigit(ch) and ch!=0:
digs = self.scanDecimalDigs()
elif ch in CHAR_CLASS_ESCAPE:
self.index += 1
return SpecialChar('\\' + ch)
else:
return self.parseCharacterEscape()
def parseCharacterEscape(self):
ch = self.source[self.index]
if ch in CONTROL_ESCAPE_CHARS:
return SpecialChar('\\' + ch)
if ch=='c':
'ok, fuck this shit.'
def scanDecimalDigs(self):
s = self.index
while not self.isEOF() and isDecimalDigit(self.source[self.index]):
self.index += 1
return self.source[s:self.index]
a = JsRegExpParser('a(?=x)', '')
print(a.parsePattern())
|
LimeTheCoder/teammates
|
refs/heads/master
|
BackupFiles/download_data.py
|
24
|
#This script should be placed in the GAE Python SDK directory.
#The path of the SDK will look like C:\Program Files (x86)\Google\google_appengine
#The script is to be used in conjunction with the generated_bulkloader.yaml file
#The script will download all types of entities from the GAE datastore except the StudentProfile entity type.
#The backup files will be stored on the Desktop with a timestamp of when the the backup is performed.
import os
import datetime
#Creates a folder on desktop with a timestamp of the backup
desktopFile = os.path.expanduser("~/Desktop/TM_Backup/")
mydir = os.path.join(desktopFile, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.makedirs(mydir)
#Runs a set of commands to download all the different types of entities from the datastore
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind Account --filename %s/accounts.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind Comment --filename %s/comment.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind Course --filename %s/course.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind Evaluation --filename %s/evaluation.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind FeedbackQuestion --filename %s/feedbackQuestion.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind FeedbackResponse --filename %s/feedbackResponse.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind FeedbackResponseComment --filename %s/feedbackResponseComment.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind FeedbackSession --filename %s/feedbackSession.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind Instructor --filename %s/instructor.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind Student --filename %s/student.csv" %mydir)
os.system("bulkloader.py --download --url https://teammatesv4.appspot.com/remote_api --config_file generated_bulkloader.yaml --kind Submission --filename %s/submission.csv" %mydir)
|
papouso/odoo
|
refs/heads/8.0
|
addons/hr_recruitment/wizard/hr_recruitment_create_partner_job.py
|
337
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_recruitment_partner_create(osv.osv_memory):
_name = 'hr.recruitment.partner.create'
_description = 'Create Partner from job application'
_columns = {
'close': fields.boolean('Close job request'),
}
def view_init(self, cr, uid, fields_list, context=None):
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
if case.partner_id:
raise osv.except_osv(_('Error!'),
_('A contact is already defined on this job request.'))
pass
def make_order(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
partner_obj = self.pool.get('res.partner')
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj._get_id(cr, uid, 'base', 'view_res_partner_filter')
res = mod_obj.read(cr, uid, result, ['res_id'], context=context)
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
partner_id = partner_obj.search(cr, uid, [('name', '=', case.partner_name or case.name)], context=context)
if partner_id:
raise osv.except_osv(_('Error!'),_('A contact is already existing with the same name.'))
partner_id = partner_obj.create(cr, uid, {
'name': case.partner_name or case.name,
'user_id': case.user_id.id,
'comment': case.description,
'phone': case.partner_phone,
'mobile': case.partner_mobile,
'email': case.email_from
}, context=context)
case_obj.write(cr, uid, [case.id], {
'partner_id': partner_id,
}, context=context)
return {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'type': 'ir.actions.act_window',
'search_view_id': res['res_id']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Rudloff/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/uplynk.py
|
1
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
ExtractorError,
)
class UplynkIE(InfoExtractor):
IE_NAME = 'uplynk'
_VALID_URL = r'https?://.*?\.uplynk\.com/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.(?:m3u8|json)(?:.*?\bpbs=(?P<session_id>[^&]+))?'
_TEST = {
'url': 'http://content.uplynk.com/e89eaf2ce9054aa89d92ddb2d817a52e.m3u8',
'info_dict': {
'id': 'e89eaf2ce9054aa89d92ddb2d817a52e',
'ext': 'mp4',
'title': '030816-kgo-530pm-solar-eclipse-vid_web.mp4',
'uploader_id': '4413701bf5a1488db55b767f8ae9d4fa',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _extract_uplynk_info(self, uplynk_content_url):
path, external_id, video_id, session_id = re.match(UplynkIE._VALID_URL, uplynk_content_url).groups()
display_id = video_id or external_id
formats = self._extract_m3u8_formats('http://content.uplynk.com/%s.m3u8' % path, display_id, 'mp4')
if session_id:
for f in formats:
f['extra_param_to_segment_url'] = {
'pbs': session_id,
}
self._sort_formats(formats)
asset = self._download_json('http://content.uplynk.com/player/assetinfo/%s.json' % path, display_id)
if asset.get('error') == 1:
raise ExtractorError('% said: %s' % (self.IE_NAME, asset['msg']), expected=True)
return {
'id': asset['asset'],
'title': asset['desc'],
'thumbnail': asset.get('default_poster_url'),
'duration': float_or_none(asset.get('duration')),
'uploader_id': asset.get('owner'),
'formats': formats,
}
def _real_extract(self, url):
return self._extract_uplynk_info(url)
class UplynkPreplayIE(UplynkIE):
IE_NAME = 'uplynk:preplay'
_VALID_URL = r'https?://.*?\.uplynk\.com/preplay2?/(?P<path>ext/[0-9a-f]{32}/(?P<external_id>[^/?&]+)|(?P<id>[0-9a-f]{32}))\.json'
_TEST = None
def _real_extract(self, url):
path, external_id, video_id = re.match(self._VALID_URL, url).groups()
display_id = video_id or external_id
preplay = self._download_json(url, display_id)
content_url = 'http://content.uplynk.com/%s.m3u8' % path
session_id = preplay.get('sid')
if session_id:
content_url += '?pbs=' + session_id
return self._extract_uplynk_info(content_url)
|
hasgeek/funnel
|
refs/heads/master
|
funnel/forms/notification.py
|
1
|
from __future__ import annotations
from collections import namedtuple
from flask import url_for
from baseframe import __
import baseframe.forms as forms
from ..models import User, notification_type_registry
from ..transports import platform_transports
__all__ = [
'transport_labels',
'UnsubscribeForm',
'SetNotificationPreferenceForm',
]
TransportLabels = namedtuple(
'TransportLabels',
[
'title',
'requirement',
'requirement_action',
'unsubscribe_form',
'unsubscribe_description',
'switch',
'enabled_main',
'enabled',
'disabled_main',
'disabled',
],
)
transport_labels = {
'email': TransportLabels(
title=__("Email"),
requirement=__("To enable, add a verified email address"),
requirement_action=lambda: url_for('add_email'),
unsubscribe_form=__("Notify me by email"),
unsubscribe_description=__("Uncheck this to disable all email notifications"),
switch=__("Email notifications"),
enabled_main=__("Enabled selected email notifications"),
enabled=__("Enabled this email notification"),
disabled_main=__("Disabled all email notifications"),
disabled=__("Disabled this email notification"),
),
'sms': TransportLabels(
title=__("SMS"),
requirement=__("To enable, add a verified phone number"),
requirement_action=lambda: url_for('add_phone'),
unsubscribe_form=__("Notify me by SMS"),
unsubscribe_description=__("Uncheck this to disable all SMS notifications"),
switch=__("SMS notifications"),
enabled_main=__("Enabled selected SMS notifications"),
enabled=__("Enabled this SMS notification"),
disabled_main=__("Disabled all SMS notifications"),
disabled=__("Disabled this SMS notification"),
),
'webpush': TransportLabels(
title=__("Browser"),
requirement=__("To enable, allow push notifications in the browser"),
requirement_action=lambda: None,
unsubscribe_form=__("Notify me with browser notifications"),
unsubscribe_description=__("Uncheck this to disable all browser notifications"),
switch=__("Push notifications"),
enabled_main=__("Enabled selected push notifications"),
enabled=__("Enabled this push notification"),
disabled_main=__("Disabled all push notifications"),
disabled=__("Disabled this push notification"),
),
'telegram': TransportLabels(
title=__("Telegram"),
requirement=__("To enable, link your Telegram account"),
requirement_action=lambda: None,
unsubscribe_form=__("Notify me on Telegram"),
unsubscribe_description=__(
"Uncheck this to disable all Telegram notifications"
),
switch=__("Telegram notifications"),
enabled_main=__("Enabled selected Telegram notifications"),
enabled=__("Enabled this Telegram notification"),
disabled_main=__("Disabled all Telegram notifications"),
disabled=__("Disabled this Telegram notification"),
),
'whatsapp': TransportLabels(
title=__("WhatsApp"),
requirement=__("To enable, add your WhatsApp number"),
requirement_action=lambda: url_for('add_phone'),
unsubscribe_form=__("Notify me on WhatsApp"),
unsubscribe_description=__(
"Uncheck this to disable all WhatsApp notifications"
),
switch=__("WhatsApp notifications"),
enabled_main=__("Enabled selected WhatsApp notifications"),
enabled=__("Enabled this WhatsApp notification"),
disabled_main=__("Disabled all WhatsApp notifications"),
disabled=__("Disabled this WhatsApp notification"),
),
}
@User.forms('unsubscribe')
class UnsubscribeForm(forms.Form):
__expects__ = ('transport', 'notification_type')
# To consider: Replace the field's ListWidget with a GroupedListWidget, and show all
# known notifications by category, not just the ones the user has received a
# notification for. This will avoid a dark pattern wherein a user keeps getting
# subscribed to new types of notifications, a problem Twitter had when they
# attempted to engage dormant accounts by inventing new reasons to email them.
# However, also consider that this will be a long and overwhelming list, and will
# not help with new notification types added after the user visits this list. The
# better option may be to set notification preferences based on previous
# preferences. A crude form of this exists in the NotificationPreferences class,
# but it should be smarter about defaults per category of notification.
main = forms.BooleanField(
__("Notify me"), description=__("Uncheck this to disable all notifications")
)
types = forms.SelectMultipleField(
__("Or disable only a specific notification"),
widget=forms.ListWidget(),
option_widget=forms.CheckboxInput(),
)
# This token is validated in the view, not here, because it has to be valid in the
# GET request itself, and the UI flow is very dependent on the validation error.
token = forms.HiddenField(
__("Unsubscribe token"), validators=[forms.validators.DataRequired()]
)
token_type = forms.HiddenField(
__("Unsubscribe token type"), validators=[forms.validators.DataRequired()]
)
def set_queries(self):
# Populate choices with all notification types that the user has a preference
# row for.
if self.transport in transport_labels:
self.main.label.text = transport_labels[self.transport].unsubscribe_form
self.main.description = transport_labels[
self.transport
].unsubscribe_description
self.types.choices = [
(
ntype,
notification_type_registry[ntype].title
+ (" 👈" if ntype == self.notification_type else ''),
)
for ntype in notification_type_registry
if ntype in self.edit_obj.notification_preferences
and notification_type_registry[ntype].allow_transport(self.transport)
] # Sorted by definition order. Usable until we introduce grouping
def get_main(self, obj):
return obj.main_notification_preferences.by_transport(self.transport)
def get_types(self, obj):
# Populate data with all notification types for which the user has the
# current transport enabled
return [
ntype
for ntype, user_prefs in obj.notification_preferences.items()
if user_prefs.by_transport(self.transport)
]
def set_main(self, obj):
obj.main_notification_preferences.set_transport(self.transport, self.main.data)
def set_types(self, obj):
# self.types.data will only contain the enabled preferences. Therefore, iterate
# through all choices and toggle true or false based on whether it's in the
# enabled list. This uses dict access instead of .get because the rows are known
# to exist (set_queries loaded from this source).
for ntype, _title in self.types.choices:
obj.notification_preferences[ntype].set_transport(
self.transport, ntype in self.types.data
)
@User.forms('set_notification_preference')
class SetNotificationPreferenceForm(forms.Form):
"""Set one notification preference."""
notification_type = forms.SelectField(__("Notification type"))
transport = forms.SelectField(
__("Transport"), validators=[forms.validators.DataRequired()]
)
enabled = forms.BooleanField(__("Enable this transport"))
def set_queries(self):
# The main switch is special-cased with an empty string for notification type
self.notification_type.choices = [('', __("Main switch"))] + [
(ntype, cls.title) for ntype, cls in notification_type_registry.items()
]
self.transport.choices = [
(transport, transport)
for transport in platform_transports
if platform_transports[transport]
]
def status_message(self):
"""Render a success or error message."""
if self.errors:
# Flatten errors into a single string because typically this will only
# be a CSRF error.
return ' '.join(' '.join(message) for message in self.errors.values())
if self.notification_type.data == '':
return (
transport_labels[self.transport.data].enabled_main
if self.enabled.data
else transport_labels[self.transport.data].disabled_main
)
return (
transport_labels[self.transport.data].enabled
if self.enabled.data
else transport_labels[self.transport.data].disabled
)
|
lordmuffin/aws-cfn-plex
|
refs/heads/master
|
functions/credstash/botocore/docs/bcdoc/docstringparser.py
|
19
|
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.compat import six
class DocStringParser(six.moves.html_parser.HTMLParser):
"""
A simple HTML parser. Focused on converting the subset of HTML
that appears in the documentation strings of the JSON models into
simple ReST format.
"""
def __init__(self, doc):
self.tree = None
self.doc = doc
six.moves.html_parser.HTMLParser.__init__(self)
def reset(self):
six.moves.html_parser.HTMLParser.reset(self)
self.tree = HTMLTree(self.doc)
def feed(self, data):
# HTMLParser is an old style class, so the super() method will not work.
six.moves.html_parser.HTMLParser.feed(self, data)
self.tree.write()
self.tree = HTMLTree(self.doc)
def close(self):
six.moves.html_parser.HTMLParser.close(self)
# Write if there is anything remaining.
self.tree.write()
self.tree = HTMLTree(self.doc)
def handle_starttag(self, tag, attrs):
self.tree.add_tag(tag, attrs=attrs)
def handle_endtag(self, tag):
self.tree.add_tag(tag, is_start=False)
def handle_data(self, data):
self.tree.add_data(data)
class HTMLTree(object):
"""
A tree which handles HTML nodes. Designed to work with a python HTML parser,
meaning that the current_node will be the most recently opened tag. When
a tag is closed, the current_node moves up to the parent node.
"""
def __init__(self, doc):
self.doc = doc
self.head = StemNode()
self.current_node = self.head
self.unhandled_tags = []
def add_tag(self, tag, attrs=None, is_start=True):
if not self._doc_has_handler(tag, is_start):
self.unhandled_tags.append(tag)
return
if is_start:
if tag == 'li':
node = LineItemNode(attrs)
else:
node = TagNode(tag, attrs)
self.current_node.add_child(node)
self.current_node = node
else:
self.current_node = self.current_node.parent
def _doc_has_handler(self, tag, is_start):
if is_start:
handler_name = 'start_%s' % tag
else:
handler_name = 'end_%s' % tag
return hasattr(self.doc.style, handler_name)
def add_data(self, data):
self.current_node.add_child(DataNode(data))
def write(self):
self.head.write(self.doc)
class Node(object):
def __init__(self, parent=None):
self.parent = parent
def write(self, doc):
raise NotImplementedError
class StemNode(Node):
def __init__(self, parent=None):
super(StemNode, self).__init__(parent)
self.children = []
def add_child(self, child):
child.parent = self
self.children.append(child)
def write(self, doc):
self._write_children(doc)
def _write_children(self, doc):
for child in self.children:
child.write(doc)
class TagNode(StemNode):
"""
A generic Tag node. It will verify that handlers exist before writing.
"""
def __init__(self, tag, attrs=None, parent=None):
super(TagNode, self).__init__(parent)
self.attrs = attrs
self.tag = tag
def write(self, doc):
self._write_start(doc)
self._write_children(doc)
self._write_end(doc)
def _write_start(self, doc):
handler_name = 'start_%s' % self.tag
if hasattr(doc.style, handler_name):
getattr(doc.style, handler_name)(self.attrs)
def _write_end(self, doc):
handler_name = 'end_%s' % self.tag
if hasattr(doc.style, handler_name):
getattr(doc.style, handler_name)()
class LineItemNode(TagNode):
def __init__(self, attrs=None, parent=None):
super(LineItemNode, self).__init__('li', attrs, parent)
def write(self, doc):
self._lstrip(self)
super(LineItemNode, self).write(doc)
def _lstrip(self, node):
"""
Traverses the tree, stripping out whitespace until text data is found
:param node: The node to strip
:return: True if non-whitespace data was found, False otherwise
"""
for child in node.children:
if isinstance(child, DataNode):
child.lstrip()
if child.data:
return True
else:
found = self._lstrip(child)
if found:
return True
return False
class DataNode(Node):
"""
A Node that contains only string data.
"""
def __init__(self, data, parent=None):
super(DataNode, self).__init__(parent)
if not isinstance(data, six.string_types):
raise ValueError("Expecting string type, %s given." % type(data))
self.data = data
def lstrip(self):
self.data = self.data.lstrip()
def write(self, doc):
if not self.data:
return
if self.data.isspace():
str_data = ' '
else:
end_space = self.data[-1].isspace()
words = self.data.split()
words = doc.translate_words(words)
str_data = ' '.join(words)
if end_space:
str_data += ' '
doc.handle_data(str_data)
|
axt/angr
|
refs/heads/master
|
angr/pathprioritizer.py
|
13
|
import logging
import networkx
l = logging.getLogger("angr.pathprioritizer")
class PathPrioritizer(object):
def __init__(self, cfg, target):
self._cfg = cfg
self._target = target
self._shortest_path_length_dict = {}
self._construct()
def __getstate__(self):
state = {}
state['_shortest_path_length_dict'] = self._shortest_path_length_dict
return state
def _construct(self):
g = self._cfg.graph
bbl_dict = self._cfg.get_bbl_dict()
assert self._target in g
assert bbl_dict is not None
# Reverse the bbl_dict
bbl_key_map = {}
for k, v in bbl_dict.items():
bbl_key_map[v] = k
# Reverse it
# As SimIRSB is not copiable, we have to do it by ourselves
reversed_graph = networkx.DiGraph()
for a, b in g.edges():
reversed_graph.add_edge(b, a)
# Do a BFS from target, and save the length of shortest path to each
# basic block
shortest_path_length = networkx.single_source_shortest_path_length( \
reversed_graph, self._target)
for k, v in shortest_path_length.items():
bbl_key = bbl_key_map[k]
self._shortest_path_length_dict[bbl_key] = v
def get_priority(self, path):
MAX_INT = 0xffffffff
# Get a list of tuples
# Each tuple looks like (a, b), where b is the function address of a
# basic block, and a is the IRSB addr where the function is called
l.debug("Retrieving path priority of %s...", path)
call_stack = path.callstack
# FIXME: For now we are only supporting level 2 context-sensitivity
# But we shouldn't hard code this anyway
if len(call_stack) == 0:
tpl = (None, None, path.addr)
else:
tpl = call_stack[-1] + (path.addr,)
if tpl in self._shortest_path_length_dict:
priority = self._shortest_path_length_dict[tpl]
l.debug("The priority is %d", priority)
return priority
else:
import ipdb
ipdb.set_trace()
l.debug("Not in our dict")
return MAX_INT
|
yanivpas/choice
|
refs/heads/master
|
utils/sync/sync.py
|
1
|
from optparse import OptionParser
def client(options):
#create an INET, STREAMing socket
s = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
#now connect to the web server on port 80
# - the normal http port
s.connect((options.ip, int(options.port)))
def server(options):
#create an INET, STREAMing socket
serversocket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
#bind the socket to a public host,
# and a well-known port
serversocket.bind((socket.gethostname(), options.port))
#become a server socket
serversocket.listen(1)
#accept connections from outside
(clientsocket, address) = serversocket.accept()
def main():
parser = OptionParser()
parser.add_option("-l", "--listen", dest="listen",
help="Be a server",action="store_true", default=False)
parser.add_option("-p", "--port",
dest="port", default=7331,
help="The port")
parser.add_option("-i", "--ip",
dest="ip",
help="The ip to connect to")
(options, args) = parser.parse_args()
if options.listen:
server(options)
elif options.ip is not None:
client(options)
else:
raise Exception("You probably didn't spsify the ip")
if '__main__' == __name__:
main()
|
gopchandani/ryu
|
refs/heads/master
|
ryu/contrib/ovs/daemon.py
|
3
|
# Copyright (c) 2010, 2011 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import resource
import signal
import sys
import time
import ovs.dirs
import ovs.fatal_signal
#import ovs.lockfile
import ovs.process
import ovs.socket_util
import ovs.timeval
import ovs.util
import ovs.vlog
vlog = ovs.vlog.Vlog("daemon")
# --detach: Should we run in the background?
_detach = False
# --pidfile: Name of pidfile (null if none).
_pidfile = None
# Our pidfile's inode and device, if we have created one.
_pidfile_dev = None
_pidfile_ino = None
# --overwrite-pidfile: Create pidfile even if one already exists and is locked?
_overwrite_pidfile = False
# --no-chdir: Should we chdir to "/"?
_chdir = True
# --monitor: Should a supervisory process monitor the daemon and restart it if
# it dies due to an error signal?
_monitor = False
# File descriptor used by daemonize_start() and daemonize_complete().
_daemonize_fd = None
RESTART_EXIT_CODE = 5
def make_pidfile_name(name):
"""Returns the file name that would be used for a pidfile if 'name' were
provided to set_pidfile()."""
if name is None or name == "":
return "%s/%s.pid" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME)
else:
return ovs.util.abs_file_name(ovs.dirs.RUNDIR, name)
def set_pidfile(name):
"""Sets up a following call to daemonize() to create a pidfile named
'name'. If 'name' begins with '/', then it is treated as an absolute path.
Otherwise, it is taken relative to ovs.util.RUNDIR, which is
$(prefix)/var/run by default.
If 'name' is null, then ovs.util.PROGRAM_NAME followed by ".pid" is
used."""
global _pidfile
_pidfile = make_pidfile_name(name)
def get_pidfile():
"""Returns an absolute path to the configured pidfile, or None if no
pidfile is configured."""
return _pidfile
def set_no_chdir():
"""Sets that we do not chdir to "/"."""
global _chdir
_chdir = False
def is_chdir_enabled():
"""Will we chdir to "/" as part of daemonizing?"""
return _chdir
def ignore_existing_pidfile():
"""Normally, daemonize() or daemonize_start() will terminate the program
with a message if a locked pidfile already exists. If this function is
called, an existing pidfile will be replaced, with a warning."""
global _overwrite_pidfile
_overwrite_pidfile = True
def set_detach():
"""Sets up a following call to daemonize() to detach from the foreground
session, running this process in the background."""
global _detach
_detach = True
def get_detach():
"""Will daemonize() really detach?"""
return _detach
def set_monitor():
"""Sets up a following call to daemonize() to fork a supervisory process to
monitor the daemon and restart it if it dies due to an error signal."""
global _monitor
_monitor = True
def _fatal(msg):
vlog.err(msg)
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def _make_pidfile():
"""If a pidfile has been configured, creates it and stores the running
process's pid in it. Ensures that the pidfile will be deleted when the
process exits."""
pid = os.getpid()
# Create a temporary pidfile.
tmpfile = "%s.tmp%d" % (_pidfile, pid)
ovs.fatal_signal.add_file_to_unlink(tmpfile)
try:
# This is global to keep Python from garbage-collecting and
# therefore closing our file after this function exits. That would
# unlock the lock for us, and we don't want that.
global file_handle
file_handle = open(tmpfile, "w")
except IOError as e:
_fatal("%s: create failed (%s)" % (tmpfile, e.strerror))
try:
s = os.fstat(file_handle.fileno())
except IOError as e:
_fatal("%s: fstat failed (%s)" % (tmpfile, e.strerror))
try:
file_handle.write("%s\n" % pid)
file_handle.flush()
except OSError as e:
_fatal("%s: write failed: %s" % (tmpfile, e.strerror))
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as e:
_fatal("%s: fcntl failed: %s" % (tmpfile, e.strerror))
# Rename or link it to the correct name.
if _overwrite_pidfile:
try:
os.rename(tmpfile, _pidfile)
except OSError as e:
_fatal("failed to rename \"%s\" to \"%s\" (%s)"
% (tmpfile, _pidfile, e.strerror))
else:
while True:
try:
os.link(tmpfile, _pidfile)
error = 0
except OSError as e:
error = e.errno
if error == errno.EEXIST:
_check_already_running()
elif error != errno.EINTR:
break
if error:
_fatal("failed to link \"%s\" as \"%s\" (%s)"
% (tmpfile, _pidfile, os.strerror(error)))
# Ensure that the pidfile will get deleted on exit.
ovs.fatal_signal.add_file_to_unlink(_pidfile)
# Delete the temporary pidfile if it still exists.
if not _overwrite_pidfile:
error = ovs.fatal_signal.unlink_file_now(tmpfile)
if error:
_fatal("%s: unlink failed (%s)" % (tmpfile, os.strerror(error)))
global _pidfile_dev
global _pidfile_ino
_pidfile_dev = s.st_dev
_pidfile_ino = s.st_ino
def daemonize():
"""If configured with set_pidfile() or set_detach(), creates the pid file
and detaches from the foreground session."""
daemonize_start()
daemonize_complete()
def _waitpid(pid, options):
while True:
try:
return os.waitpid(pid, options)
except OSError as e:
if e.errno == errno.EINTR:
pass
return -e.errno, 0
def _fork_and_wait_for_startup():
try:
rfd, wfd = os.pipe()
except OSError as e:
sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno))
sys.exit(1)
try:
pid = os.fork()
except OSError as e:
sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno))
sys.exit(1)
if pid > 0:
# Running in parent process.
os.close(wfd)
ovs.fatal_signal.fork()
while True:
try:
s = os.read(rfd, 1)
error = 0
except OSError as e:
s = ""
error = e.errno
if error != errno.EINTR:
break
if len(s) != 1:
retval, status = _waitpid(pid, 0)
if retval == pid:
if os.WIFEXITED(status) and os.WEXITSTATUS(status):
# Child exited with an error. Convey the same error to
# our parent process as a courtesy.
sys.exit(os.WEXITSTATUS(status))
else:
sys.stderr.write("fork child failed to signal "
"startup (%s)\n"
% ovs.process.status_msg(status))
else:
assert retval < 0
sys.stderr.write("waitpid failed (%s)\n"
% os.strerror(-retval))
sys.exit(1)
os.close(rfd)
else:
# Running in parent process.
os.close(rfd)
ovs.timeval.postfork()
#ovs.lockfile.postfork()
global _daemonize_fd
_daemonize_fd = wfd
return pid
def _fork_notify_startup(fd):
if fd is not None:
error, bytes_written = ovs.socket_util.write_fully(fd, "0")
if error:
sys.stderr.write("could not write to pipe\n")
sys.exit(1)
os.close(fd)
def _should_restart(status):
global RESTART_EXIT_CODE
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == RESTART_EXIT_CODE:
return True
if os.WIFSIGNALED(status):
for signame in ("SIGABRT", "SIGALRM", "SIGBUS", "SIGFPE", "SIGILL",
"SIGPIPE", "SIGSEGV", "SIGXCPU", "SIGXFSZ"):
if os.WTERMSIG(status) == getattr(signal, signame, None):
return True
return False
def _monitor_daemon(daemon_pid):
# XXX should log daemon's stderr output at startup time
# XXX should use setproctitle module if available
last_restart = None
while True:
retval, status = _waitpid(daemon_pid, 0)
if retval < 0:
sys.stderr.write("waitpid failed\n")
sys.exit(1)
elif retval == daemon_pid:
status_msg = ("pid %d died, %s"
% (daemon_pid, ovs.process.status_msg(status)))
if _should_restart(status):
if os.WCOREDUMP(status):
# Disable further core dumps to save disk space.
try:
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except resource.error:
vlog.warn("failed to disable core dumps")
# Throttle restarts to no more than once every 10 seconds.
if (last_restart is not None and
ovs.timeval.msec() < last_restart + 10000):
vlog.warn("%s, waiting until 10 seconds since last "
"restart" % status_msg)
while True:
now = ovs.timeval.msec()
wakeup = last_restart + 10000
if now > wakeup:
break
print("sleep %f" % ((wakeup - now) / 1000.0))
time.sleep((wakeup - now) / 1000.0)
last_restart = ovs.timeval.msec()
vlog.err("%s, restarting" % status_msg)
daemon_pid = _fork_and_wait_for_startup()
if not daemon_pid:
break
else:
vlog.info("%s, exiting" % status_msg)
sys.exit(0)
# Running in new daemon process.
def _close_standard_fds():
"""Close stdin, stdout, stderr. If we're started from e.g. an SSH session,
then this keeps us from holding that session open artificially."""
null_fd = ovs.socket_util.get_null_fd()
if null_fd >= 0:
os.dup2(null_fd, 0)
os.dup2(null_fd, 1)
os.dup2(null_fd, 2)
def daemonize_start():
"""If daemonization is configured, then starts daemonization, by forking
and returning in the child process. The parent process hangs around until
the child lets it know either that it completed startup successfully (by
calling daemon_complete()) or that it failed to start up (by exiting with a
nonzero exit code)."""
if _detach:
if _fork_and_wait_for_startup() > 0:
# Running in parent process.
sys.exit(0)
# Running in daemon or monitor process.
if _monitor:
saved_daemonize_fd = _daemonize_fd
daemon_pid = _fork_and_wait_for_startup()
if daemon_pid > 0:
# Running in monitor process.
_fork_notify_startup(saved_daemonize_fd)
_close_standard_fds()
_monitor_daemon(daemon_pid)
# Running in daemon process
if _pidfile:
_make_pidfile()
def daemonize_complete():
"""If daemonization is configured, then this function notifies the parent
process that the child process has completed startup successfully."""
_fork_notify_startup(_daemonize_fd)
if _detach:
os.setsid()
if _chdir:
os.chdir("/")
_close_standard_fds()
def usage():
sys.stdout.write("""
Daemon options:
--detach run in background as daemon
--no-chdir do not chdir to '/'
--pidfile[=FILE] create pidfile (default: %s/%s.pid)
--overwrite-pidfile with --pidfile, start even if already running
""" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME))
def __read_pidfile(pidfile, delete_if_stale):
if _pidfile_dev is not None:
try:
s = os.stat(pidfile)
if s.st_ino == _pidfile_ino and s.st_dev == _pidfile_dev:
# It's our own pidfile. We can't afford to open it,
# because closing *any* fd for a file that a process
# has locked also releases all the locks on that file.
#
# Fortunately, we know the associated pid anyhow.
return os.getpid()
except OSError:
pass
try:
file_handle = open(pidfile, "r+")
except IOError as e:
if e.errno == errno.ENOENT and delete_if_stale:
return 0
vlog.warn("%s: open: %s" % (pidfile, e.strerror))
return -e.errno
# Python fcntl doesn't directly support F_GETLK so we have to just try
# to lock it.
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
# pidfile exists but wasn't locked by anyone. Now we have the lock.
if not delete_if_stale:
file_handle.close()
vlog.warn("%s: pid file is stale" % pidfile)
return -errno.ESRCH
# Is the file we have locked still named 'pidfile'?
try:
raced = False
s = os.stat(pidfile)
s2 = os.fstat(file_handle.fileno())
if s.st_ino != s2.st_ino or s.st_dev != s2.st_dev:
raced = True
except IOError:
raced = True
if raced:
vlog.warn("%s: lost race to delete pidfile" % pidfile)
return -errno.EALREADY
# We won the right to delete the stale pidfile.
try:
os.unlink(pidfile)
except IOError as e:
vlog.warn("%s: failed to delete stale pidfile (%s)"
% (pidfile, e.strerror))
return -e.errno
else:
vlog.dbg("%s: deleted stale pidfile" % pidfile)
file_handle.close()
return 0
except IOError as e:
if e.errno not in [errno.EACCES, errno.EAGAIN]:
vlog.warn("%s: fcntl: %s" % (pidfile, e.strerror))
return -e.errno
# Someone else has the pidfile locked.
try:
try:
error = int(file_handle.readline())
except IOError as e:
vlog.warn("%s: read: %s" % (pidfile, e.strerror))
error = -e.errno
except ValueError:
vlog.warn("%s does not contain a pid" % pidfile)
error = -errno.EINVAL
return error
finally:
try:
file_handle.close()
except IOError:
pass
def read_pidfile(pidfile):
"""Opens and reads a PID from 'pidfile'. Returns the positive PID if
successful, otherwise a negative errno value."""
return __read_pidfile(pidfile, False)
def _check_already_running():
pid = __read_pidfile(_pidfile, True)
if pid > 0:
_fatal("%s: already running as pid %d, aborting" % (_pidfile, pid))
elif pid < 0:
_fatal("%s: pidfile check failed (%s), aborting"
% (_pidfile, os.strerror(pid)))
def add_args(parser):
"""Populates 'parser', an ArgumentParser allocated using the argparse
module, with the command line arguments required by the daemon module."""
pidfile = make_pidfile_name(None)
group = parser.add_argument_group(title="Daemon Options")
group.add_argument("--detach", action="store_true",
help="Run in background as a daemon.")
group.add_argument("--no-chdir", action="store_true",
help="Do not chdir to '/'.")
group.add_argument("--monitor", action="store_true",
help="Monitor %s process." % ovs.util.PROGRAM_NAME)
group.add_argument("--pidfile", nargs="?", const=pidfile,
help="Create pidfile (default %s)." % pidfile)
group.add_argument("--overwrite-pidfile", action="store_true",
help="With --pidfile, start even if already running.")
def handle_args(args):
"""Handles daemon module settings in 'args'. 'args' is an object
containing values parsed by the parse_args() method of ArgumentParser. The
parent ArgumentParser should have been prepared by add_args() before
calling parse_args()."""
if args.detach:
set_detach()
if args.no_chdir:
set_no_chdir()
if args.pidfile:
set_pidfile(args.pidfile)
if args.overwrite_pidfile:
ignore_existing_pidfile()
if args.monitor:
set_monitor()
|
LordDarkula/polypy
|
refs/heads/master
|
test/test_product.py
|
1
|
from polypy.base import x
def test_call():
f = 2 * x
assert f(2) == 4
f = 3 * x ** 2
assert f(3) == 27
def test_str():
f = 2 * x
assert str(f) == "2x"
f = x * 2
assert str(f) == "2x"
def test_square():
f = x
assert f * x == x ** 2
f = 3 * x
assert f ** 2 == 9 * x ** 2
def test_multiply_x_and_linear_term():
f = 2 * x
assert f * x == (2 * x ** 2)
def test_multiply_two_linear_terms():
assert (3 * x) * (2 * x) == 6 * x ** 2
def test_multiply_two_linear_expressions():
print((x + 1) * (x + 2))
assert (x + 1) * (x + 2) == (x**2 + 2*x + x + 2)
|
mcrowson/django
|
refs/heads/master
|
tests/flatpages_tests/test_models.py
|
342
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.flatpages.models import FlatPage
from django.test import SimpleTestCase
from django.test.utils import override_script_prefix
class FlatpageModelTests(SimpleTestCase):
def test_get_absolute_url_urlencodes(self):
pf = FlatPage(title="Café!", url='/café/')
self.assertEqual(pf.get_absolute_url(), '/caf%C3%A9/')
@override_script_prefix('/beverages/')
def test_get_absolute_url_honors_script_prefix(self):
pf = FlatPage(title="Tea!", url='/tea/')
self.assertEqual(pf.get_absolute_url(), '/beverages/tea/')
|
hgl888/chromium-crosswalk-efl
|
refs/heads/efl/crosswalk-10/39.0.2171.19
|
tools/memory_inspector/memory_inspector/classification/results.py
|
108
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module owns the logic for classifying and aggregating data in buckets.
This complements the structure defined in the rules module. Symmetrically, the
aggregated results are organized in a bucket tree, which structure is identical
to the one of the corresponding rule tree.
The logic for aggregation is the following:
- The client loads a "rule tree" defined by the end-user (e.g., in a file) which
defines the final "shape" of the results.
- The rules define how to "match" a trace_record (e.g., a mmap line or a native
allocation) given some of its properties (e.g. the mapped file or the prot.
flags).
- The concrete classifier (which will use this module) knows how to count the
values for each trace_record (e.g. [Dirty KB, Clean KB, RSS KB] for mmaps).
Hence it decides the cardinality of the result nodes.
- The responsibility of this module is simply doing the math.
In the very essence this module adds up the counters of each node whereas the
trace_record being pushed in the tree (through the AddToMatchingNodes method)
matches a rule.
It just does this math in a hierarchical fashion following the shape the tree.
A typical result tree looks like this (each node has two values in the example):
+----------------------+
| Total |
|----------------------|
+------------------+ (100, 1000) +--------------------+
| +----------+-----------+ |
| | |
+-----v-----+ +-----v-----+ +------v----+
| Foo | | Bar | |Total-other|
|-----------| |-----------| |-----------|
| (15, 100) | +---+ (80, 200) +-----+ | (5, 700) |
+-----------+ | +-----------+ | +-----------+
| |
+------v------+ +------v-----+
| Bar::Coffee | | Bar-other |
|-------------| |------------|
| (30, 120) | | (50, 80) |
+-------------+ +------------+
"""
from memory_inspector.classification import rules
class AggreatedResults(object):
"""A tree of results, where each node is a bucket (root: 'Total' bucket)."""
def __init__(self, rule_tree, keys):
"""Initializes the bucket tree using the structure of the rules tree.
Each node of the bucket tree is initialized with a list of len(keys) zeros.
"""
assert(isinstance(rule_tree, rules.Rule))
assert(isinstance(keys, list))
self.keys = keys
self.total = AggreatedResults._MakeBucketNodeFromRule(rule_tree, len(keys))
def AddToMatchingNodes(self, trace_record, values):
"""Adds the provided |values| to the nodes that match the |trace_record|.
Tree traversal logic: at any level, one and only one node will match the
|trace_record| (in the worst case it will be the catchall *-other rule).
When a node is matched, the traversal continues in its children and no
further siblings in the upper levels are visited anymore.
This is to guarantee that at any level the values of one node are equal to
the sum of the values of all its children.
Args:
trace_record: any kind of object which can be matched by the Match method
of the Rule object.
values: a list of int(s) which represent the value associated to the
matched trace_record. The cardinality of the list must be equal to the
cardinality of the initial keys.
"""
assert(len(values) == len(self.keys))
AggreatedResults._AddToMatchingNodes(
trace_record, values, self.total, len(self.keys))
@staticmethod
def _AddToMatchingNodes(trace_record, values, bucket, num_keys):
if not bucket.rule.Match(trace_record):
return False
for i in xrange(num_keys):
bucket.values[i] += values[i]
for child_bucket in bucket.children:
if AggreatedResults._AddToMatchingNodes(
trace_record, values, child_bucket, num_keys):
break
return True
@staticmethod
def _MakeBucketNodeFromRule(rule, num_keys):
assert(isinstance(rule, rules.Rule))
bucket = Bucket(rule, num_keys)
for child_rule in rule.children:
bucket.children.append(
AggreatedResults._MakeBucketNodeFromRule(child_rule, num_keys))
return bucket
class Bucket(object):
"""A bucket is a node in the results tree. """
def __init__(self, rule, num_keys):
self.rule = rule
self.values = [0] * num_keys
self.children = []
@property
def name(self):
return self.rule.name
|
bsmitty83/kernel_omap
|
refs/heads/test-tuna-3.4
|
tools/perf/scripts/python/net_dropmonitor.py
|
1258
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms[::-1]:
if loc >= i['loc']:
return (i['name'], loc - i['loc'])
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
alcherk/mal
|
refs/heads/master
|
rpython/step3_env.py
|
50
|
#import sys, traceback
import mal_readline
import mal_types as types
from mal_types import (MalSym, MalInt, MalStr,
_symbol, _keywordu,
MalList, _list, MalVector, MalHashMap, MalFunc)
import reader, printer
from env import Env
# read
def READ(str):
return reader.read_str(str)
# eval
def eval_ast(ast, env):
if types._symbol_Q(ast):
assert isinstance(ast, MalSym)
return env.get(ast)
elif types._list_Q(ast):
res = []
for a in ast.values:
res.append(EVAL(a, env))
return MalList(res)
elif types._vector_Q(ast):
res = []
for a in ast.values:
res.append(EVAL(a, env))
return MalVector(res)
elif types._hash_map_Q(ast):
new_dct = {}
for k in ast.dct.keys():
new_dct[k] = EVAL(ast.dct[k], env)
return MalHashMap(new_dct)
else:
return ast # primitive value, return unchanged
def EVAL(ast, env):
#print("EVAL %s" % printer._pr_str(ast))
if not types._list_Q(ast):
return eval_ast(ast, env)
# apply list
if len(ast) == 0: return ast
a0 = ast[0]
if not isinstance(a0, MalSym):
raise Exception("attempt to apply on non-symbol")
if u"def!" == a0.value:
a1, a2 = ast[1], ast[2]
res = EVAL(a2, env)
return env.set(a1, res)
elif u"let*" == a0.value:
a1, a2 = ast[1], ast[2]
let_env = Env(env)
for i in range(0, len(a1), 2):
let_env.set(a1[i], EVAL(a1[i+1], let_env))
return EVAL(a2, let_env)
else:
el = eval_ast(ast, env)
f = el.values[0]
if isinstance(f, MalFunc):
return f.apply(el.values[1:])
else:
raise Exception("%s is not callable" % f)
# print
def PRINT(exp):
return printer._pr_str(exp)
# repl
repl_env = Env()
def REP(str, env):
return PRINT(EVAL(READ(str), env))
def plus(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(a.value+b.value)
def minus(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(a.value-b.value)
def multiply(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(a.value*b.value)
def divide(args):
a, b = args[0], args[1]
assert isinstance(a, MalInt)
assert isinstance(b, MalInt)
return MalInt(int(a.value/b.value))
repl_env.set(_symbol(u'+'), MalFunc(plus))
repl_env.set(_symbol(u'-'), MalFunc(minus))
repl_env.set(_symbol(u'*'), MalFunc(multiply))
repl_env.set(_symbol(u'/'), MalFunc(divide))
def entry_point(argv):
while True:
try:
line = mal_readline.readline("user> ")
if line == "": continue
print(REP(line, repl_env))
except EOFError as e:
break
except reader.Blank:
continue
except types.MalException as e:
print(u"Error: %s" % printer._pr_str(e.object, False))
except Exception as e:
print("Error: %s" % e)
#print("".join(traceback.format_exception(*sys.exc_info())))
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point
# Just run entry_point if not RPython compilation
import sys
if not sys.argv[0].endswith('rpython'):
entry_point(sys.argv)
|
sunqb/oa_qian
|
refs/heads/master
|
flask/Lib/site-packages/sqlalchemy/testing/assertsql.py
|
32
|
# testing/assertsql.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ..engine.default import DefaultDialect
from .. import util
import re
class AssertRule(object):
def process_execute(self, clauseelement, *multiparams, **params):
pass
def process_cursor_execute(self, statement, parameters, context,
executemany):
pass
def is_consumed(self):
"""Return True if this rule has been consumed, False if not.
Should raise an AssertionError if this rule's condition has
definitely failed.
"""
raise NotImplementedError()
def rule_passed(self):
"""Return True if the last test of this rule passed, False if
failed, None if no test was applied."""
raise NotImplementedError()
def consume_final(self):
"""Return True if this rule has been consumed.
Should raise an AssertionError if this rule's condition has not
been consumed or has failed.
"""
if self._result is None:
assert False, 'Rule has not been consumed'
return self.is_consumed()
class SQLMatchRule(AssertRule):
def __init__(self):
self._result = None
self._errmsg = ""
def rule_passed(self):
return self._result
def is_consumed(self):
if self._result is None:
return False
assert self._result, self._errmsg
return True
class ExactSQL(SQLMatchRule):
def __init__(self, sql, params=None):
SQLMatchRule.__init__(self)
self.sql = sql
self.params = params
def process_cursor_execute(self, statement, parameters, context,
executemany):
if not context:
return
_received_statement = \
_process_engine_statement(context.unicode_statement,
context)
_received_parameters = context.compiled_parameters
# TODO: remove this step once all unit tests are migrated, as
# ExactSQL should really be *exact* SQL
sql = _process_assertion_statement(self.sql, context)
equivalent = _received_statement == sql
if self.params:
if util.callable(self.params):
params = self.params(context)
else:
params = self.params
if not isinstance(params, list):
params = [params]
equivalent = equivalent and params \
== context.compiled_parameters
else:
params = {}
self._result = equivalent
if not self._result:
self._errmsg = (
'Testing for exact statement %r exact params %r, '
'received %r with params %r' %
(sql, params, _received_statement, _received_parameters))
class RegexSQL(SQLMatchRule):
def __init__(self, regex, params=None):
SQLMatchRule.__init__(self)
self.regex = re.compile(regex)
self.orig_regex = regex
self.params = params
def process_cursor_execute(self, statement, parameters, context,
executemany):
if not context:
return
_received_statement = \
_process_engine_statement(context.unicode_statement,
context)
_received_parameters = context.compiled_parameters
equivalent = bool(self.regex.match(_received_statement))
if self.params:
if util.callable(self.params):
params = self.params(context)
else:
params = self.params
if not isinstance(params, list):
params = [params]
# do a positive compare only
for param, received in zip(params, _received_parameters):
for k, v in param.items():
if k not in received or received[k] != v:
equivalent = False
break
else:
params = {}
self._result = equivalent
if not self._result:
self._errmsg = \
'Testing for regex %r partial params %r, received %r '\
'with params %r' % (self.orig_regex, params,
_received_statement,
_received_parameters)
class CompiledSQL(SQLMatchRule):
def __init__(self, statement, params=None):
SQLMatchRule.__init__(self)
self.statement = statement
self.params = params
def process_cursor_execute(self, statement, parameters, context,
executemany):
if not context:
return
from sqlalchemy.schema import _DDLCompiles
_received_parameters = list(context.compiled_parameters)
# recompile from the context, using the default dialect
if isinstance(context.compiled.statement, _DDLCompiles):
compiled = \
context.compiled.statement.compile(dialect=DefaultDialect())
else:
compiled = (
context.compiled.statement.compile(
dialect=DefaultDialect(),
column_keys=context.compiled.column_keys)
)
_received_statement = re.sub(r'[\n\t]', '', str(compiled))
equivalent = self.statement == _received_statement
if self.params:
if util.callable(self.params):
params = self.params(context)
else:
params = self.params
if not isinstance(params, list):
params = [params]
else:
params = list(params)
all_params = list(params)
all_received = list(_received_parameters)
while params:
param = dict(params.pop(0))
for k, v in context.compiled.params.items():
param.setdefault(k, v)
if param not in _received_parameters:
equivalent = False
break
else:
_received_parameters.remove(param)
if _received_parameters:
equivalent = False
else:
params = {}
all_params = {}
all_received = []
self._result = equivalent
if not self._result:
print('Testing for compiled statement %r partial params '
'%r, received %r with params %r' %
(self.statement, all_params,
_received_statement, all_received))
self._errmsg = (
'Testing for compiled statement %r partial params %r, '
'received %r with params %r' %
(self.statement, all_params,
_received_statement, all_received))
# print self._errmsg
class CountStatements(AssertRule):
def __init__(self, count):
self.count = count
self._statement_count = 0
def process_execute(self, clauseelement, *multiparams, **params):
self._statement_count += 1
def process_cursor_execute(self, statement, parameters, context,
executemany):
pass
def is_consumed(self):
return False
def consume_final(self):
assert self.count == self._statement_count, \
'desired statement count %d does not match %d' \
% (self.count, self._statement_count)
return True
class AllOf(AssertRule):
def __init__(self, *rules):
self.rules = set(rules)
def process_execute(self, clauseelement, *multiparams, **params):
for rule in self.rules:
rule.process_execute(clauseelement, *multiparams, **params)
def process_cursor_execute(self, statement, parameters, context,
executemany):
for rule in self.rules:
rule.process_cursor_execute(statement, parameters, context,
executemany)
def is_consumed(self):
if not self.rules:
return True
for rule in list(self.rules):
if rule.rule_passed(): # a rule passed, move on
self.rules.remove(rule)
return len(self.rules) == 0
return False
def rule_passed(self):
return self.is_consumed()
def consume_final(self):
return len(self.rules) == 0
class Or(AllOf):
def __init__(self, *rules):
self.rules = set(rules)
self._consume_final = False
def is_consumed(self):
if not self.rules:
return True
for rule in list(self.rules):
if rule.rule_passed(): # a rule passed
self._consume_final = True
return True
return False
def consume_final(self):
assert self._consume_final, "Unsatisified rules remain"
def _process_engine_statement(query, context):
if util.jython:
# oracle+zxjdbc passes a PyStatement when returning into
query = str(query)
if context.engine.name == 'mssql' \
and query.endswith('; select scope_identity()'):
query = query[:-25]
query = re.sub(r'\n', '', query)
return query
def _process_assertion_statement(query, context):
paramstyle = context.dialect.paramstyle
if paramstyle == 'named':
pass
elif paramstyle == 'pyformat':
query = re.sub(r':([\w_]+)', r"%(\1)s", query)
else:
# positional params
repl = None
if paramstyle == 'qmark':
repl = "?"
elif paramstyle == 'format':
repl = r"%s"
elif paramstyle == 'numeric':
repl = None
query = re.sub(r':([\w_]+)', repl, query)
return query
class SQLAssert(object):
rules = None
def add_rules(self, rules):
self.rules = list(rules)
def statement_complete(self):
for rule in self.rules:
if not rule.consume_final():
assert False, \
'All statements are complete, but pending '\
'assertion rules remain'
def clear_rules(self):
del self.rules
def execute(self, conn, clauseelement, multiparams, params, result):
if self.rules is not None:
if not self.rules:
assert False, \
'All rules have been exhausted, but further '\
'statements remain'
rule = self.rules[0]
rule.process_execute(clauseelement, *multiparams, **params)
if rule.is_consumed():
self.rules.pop(0)
def cursor_execute(self, conn, cursor, statement, parameters,
context, executemany):
if self.rules:
rule = self.rules[0]
rule.process_cursor_execute(statement, parameters, context,
executemany)
asserter = SQLAssert()
|
elmartinezinfo/three.js
|
refs/heads/master
|
utils/exporters/blender/2.65/scripts/addons/io_mesh_threejs/__init__.py
|
9
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# ################################################################
# Init
# ################################################################
bl_info = {
"name": "three.js format",
"author": "mrdoob, kikko, alteredq, remoe, pxf, n3tfr34k, crobi",
"version": (1, 5, 0),
"blender": (2, 7, 0),
"location": "File > Import-Export",
"description": "Import-Export three.js meshes",
"warning": "",
"wiki_url": "https://github.com/mrdoob/three.js/tree/master/utils/exporters/blender",
"tracker_url": "https://github.com/mrdoob/three.js/issues",
"category": "Import-Export"}
# To support reload properly, try to access a package var,
# if it's there, reload everything
import bpy
if "bpy" in locals():
import imp
if "export_threejs" in locals():
imp.reload(export_threejs)
if "import_threejs" in locals():
imp.reload(import_threejs)
from bpy.props import *
from bpy_extras.io_utils import ExportHelper, ImportHelper
# ################################################################
# Custom properties
# ################################################################
bpy.types.Object.THREE_castShadow = bpy.props.BoolProperty()
bpy.types.Object.THREE_receiveShadow = bpy.props.BoolProperty()
bpy.types.Object.THREE_doubleSided = bpy.props.BoolProperty()
bpy.types.Object.THREE_exportGeometry = bpy.props.BoolProperty(default = True)
bpy.types.Material.THREE_useVertexColors = bpy.props.BoolProperty()
bpy.types.Material.THREE_depthWrite = bpy.props.BoolProperty(default = True)
bpy.types.Material.THREE_depthTest = bpy.props.BoolProperty(default = True)
THREE_material_types = [("Basic", "Basic", "Basic"), ("Phong", "Phong", "Phong"), ("Lambert", "Lambert", "Lambert")]
bpy.types.Material.THREE_materialType = EnumProperty(name = "Material type", description = "Material type", items = THREE_material_types, default = "Lambert")
THREE_blending_types = [("NoBlending", "NoBlending", "NoBlending"), ("NormalBlending", "NormalBlending", "NormalBlending"),
("AdditiveBlending", "AdditiveBlending", "AdditiveBlending"), ("SubtractiveBlending", "SubtractiveBlending", "SubtractiveBlending"),
("MultiplyBlending", "MultiplyBlending", "MultiplyBlending"), ("AdditiveAlphaBlending", "AdditiveAlphaBlending", "AdditiveAlphaBlending")]
bpy.types.Material.THREE_blendingType = EnumProperty(name = "Blending type", description = "Blending type", items = THREE_blending_types, default = "NormalBlending")
class OBJECT_PT_hello( bpy.types.Panel ):
bl_label = "THREE"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
def draw(self, context):
layout = self.layout
obj = context.object
row = layout.row()
row.label(text="Selected object: " + obj.name )
row = layout.row()
row.prop( obj, "THREE_exportGeometry", text="Export geometry" )
row = layout.row()
row.prop( obj, "THREE_castShadow", text="Casts shadow" )
row = layout.row()
row.prop( obj, "THREE_receiveShadow", text="Receives shadow" )
row = layout.row()
row.prop( obj, "THREE_doubleSided", text="Double sided" )
class MATERIAL_PT_hello( bpy.types.Panel ):
bl_label = "THREE"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
def draw(self, context):
layout = self.layout
mat = context.material
row = layout.row()
row.label(text="Selected material: " + mat.name )
row = layout.row()
row.prop( mat, "THREE_materialType", text="Material type" )
row = layout.row()
row.prop( mat, "THREE_blendingType", text="Blending type" )
row = layout.row()
row.prop( mat, "THREE_useVertexColors", text="Use vertex colors" )
row = layout.row()
row.prop( mat, "THREE_depthWrite", text="Enable depth writing" )
row = layout.row()
row.prop( mat, "THREE_depthTest", text="Enable depth testing" )
# ################################################################
# Importer
# ################################################################
class ImportTHREEJS(bpy.types.Operator, ImportHelper):
'''Load a Three.js ASCII JSON model'''
bl_idname = "import.threejs"
bl_label = "Import Three.js"
filename_ext = ".js"
filter_glob = StringProperty(default="*.js", options={'HIDDEN'})
option_flip_yz = BoolProperty(name="Flip YZ", description="Flip YZ", default=True)
recalculate_normals = BoolProperty(name="Recalculate normals", description="Recalculate vertex normals", default=True)
option_worker = BoolProperty(name="Worker", description="Old format using workers", default=False)
def execute(self, context):
import io_mesh_threejs.import_threejs
return io_mesh_threejs.import_threejs.load(self, context, **self.properties)
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(self.properties, "option_flip_yz")
row = layout.row()
row.prop(self.properties, "recalculate_normals")
row = layout.row()
row.prop(self.properties, "option_worker")
# ################################################################
# Exporter - settings
# ################################################################
SETTINGS_FILE_EXPORT = "threejs_settings_export.js"
import os
import json
def file_exists(filename):
"""Return true if file exists and accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_settings_fullpath():
return os.path.join(bpy.app.tempdir, SETTINGS_FILE_EXPORT)
def save_settings_export(properties):
settings = {
"option_export_scene" : properties.option_export_scene,
"option_embed_meshes" : properties.option_embed_meshes,
"option_url_base_html" : properties.option_url_base_html,
"option_copy_textures" : properties.option_copy_textures,
"option_lights" : properties.option_lights,
"option_cameras" : properties.option_cameras,
"option_animation_morph" : properties.option_animation_morph,
"option_animation_skeletal" : properties.option_animation_skeletal,
"option_frame_index_as_time" : properties.option_frame_index_as_time,
"option_frame_step" : properties.option_frame_step,
"option_all_meshes" : properties.option_all_meshes,
"option_flip_yz" : properties.option_flip_yz,
"option_materials" : properties.option_materials,
"option_normals" : properties.option_normals,
"option_colors" : properties.option_colors,
"option_uv_coords" : properties.option_uv_coords,
"option_faces" : properties.option_faces,
"option_vertices" : properties.option_vertices,
"option_skinning" : properties.option_skinning,
"option_bones" : properties.option_bones,
"option_vertices_truncate" : properties.option_vertices_truncate,
"option_scale" : properties.option_scale,
"align_model" : properties.align_model
}
fname = get_settings_fullpath()
f = open(fname, "w")
json.dump(settings, f)
def restore_settings_export(properties):
settings = {}
fname = get_settings_fullpath()
if file_exists(fname):
f = open(fname, "r")
settings = json.load(f)
properties.option_vertices = settings.get("option_vertices", True)
properties.option_vertices_truncate = settings.get("option_vertices_truncate", False)
properties.option_faces = settings.get("option_faces", True)
properties.option_normals = settings.get("option_normals", True)
properties.option_colors = settings.get("option_colors", True)
properties.option_uv_coords = settings.get("option_uv_coords", True)
properties.option_materials = settings.get("option_materials", True)
properties.option_skinning = settings.get("option_skinning", True)
properties.option_bones = settings.get("option_bones", True)
properties.align_model = settings.get("align_model", "None")
properties.option_scale = settings.get("option_scale", 1.0)
properties.option_flip_yz = settings.get("option_flip_yz", True)
properties.option_export_scene = settings.get("option_export_scene", False)
properties.option_embed_meshes = settings.get("option_embed_meshes", True)
properties.option_url_base_html = settings.get("option_url_base_html", False)
properties.option_copy_textures = settings.get("option_copy_textures", False)
properties.option_lights = settings.get("option_lights", False)
properties.option_cameras = settings.get("option_cameras", False)
properties.option_animation_morph = settings.get("option_animation_morph", False)
properties.option_animation_skeletal = settings.get("option_animation_skeletal", False)
properties.option_frame_index_as_time = settings.get("option_frame_index_as_time", False)
properties.option_frame_step = settings.get("option_frame_step", 1)
properties.option_all_meshes = settings.get("option_all_meshes", True)
# ################################################################
# Exporter
# ################################################################
class ExportTHREEJS(bpy.types.Operator, ExportHelper):
'''Export selected object / scene for Three.js (ASCII JSON format).'''
bl_idname = "export.threejs"
bl_label = "Export Three.js"
filename_ext = ".js"
option_vertices = BoolProperty(name = "Vertices", description = "Export vertices", default = True)
option_vertices_deltas = BoolProperty(name = "Deltas", description = "Delta vertices", default = False)
option_vertices_truncate = BoolProperty(name = "Truncate", description = "Truncate vertices", default = False)
option_faces = BoolProperty(name = "Faces", description = "Export faces", default = True)
option_faces_deltas = BoolProperty(name = "Deltas", description = "Delta faces", default = False)
option_normals = BoolProperty(name = "Normals", description = "Export normals", default = True)
option_colors = BoolProperty(name = "Colors", description = "Export vertex colors", default = True)
option_uv_coords = BoolProperty(name = "UVs", description = "Export texture coordinates", default = True)
option_materials = BoolProperty(name = "Materials", description = "Export materials", default = True)
option_skinning = BoolProperty(name = "Skinning", description = "Export skin data", default = True)
option_bones = BoolProperty(name = "Bones", description = "Export bones", default = True)
align_types = [("None","None","None"), ("Center","Center","Center"), ("Bottom","Bottom","Bottom"), ("Top","Top","Top")]
align_model = EnumProperty(name = "Align model", description = "Align model", items = align_types, default = "None")
option_scale = FloatProperty(name = "Scale", description = "Scale vertices", min = 0.01, max = 1000.0, soft_min = 0.01, soft_max = 1000.0, default = 1.0)
option_flip_yz = BoolProperty(name = "Flip YZ", description = "Flip YZ", default = True)
option_export_scene = BoolProperty(name = "Scene", description = "Export scene", default = False)
option_embed_meshes = BoolProperty(name = "Embed meshes", description = "Embed meshes", default = True)
option_copy_textures = BoolProperty(name = "Copy textures", description = "Copy textures", default = False)
option_url_base_html = BoolProperty(name = "HTML as url base", description = "Use HTML as url base ", default = False)
option_lights = BoolProperty(name = "Lights", description = "Export default scene lights", default = False)
option_cameras = BoolProperty(name = "Cameras", description = "Export default scene cameras", default = False)
option_animation_morph = BoolProperty(name = "Morph animation", description = "Export animation (morphs)", default = False)
option_animation_skeletal = BoolProperty(name = "Skeletal animation", description = "Export animation (skeletal)", default = False)
option_frame_index_as_time = BoolProperty(name = "Frame index as time", description = "Use (original) frame index as frame time", default = False)
option_frame_step = IntProperty(name = "Frame step", description = "Animation frame step", min = 1, max = 1000, soft_min = 1, soft_max = 1000, default = 1)
option_all_meshes = BoolProperty(name = "All meshes", description = "All meshes (merged)", default = True)
def invoke(self, context, event):
restore_settings_export(self.properties)
return ExportHelper.invoke(self, context, event)
@classmethod
def poll(cls, context):
return context.active_object != None
def execute(self, context):
print("Selected: " + context.active_object.name)
if not self.properties.filepath:
raise Exception("filename not set")
save_settings_export(self.properties)
filepath = self.filepath
import io_mesh_threejs.export_threejs
return io_mesh_threejs.export_threejs.save(self, context, **self.properties)
def draw(self, context):
layout = self.layout
row = layout.row()
row.label(text="Geometry:")
row = layout.row()
row.prop(self.properties, "option_vertices")
# row = layout.row()
# row.enabled = self.properties.option_vertices
# row.prop(self.properties, "option_vertices_deltas")
row.prop(self.properties, "option_vertices_truncate")
layout.separator()
row = layout.row()
row.prop(self.properties, "option_faces")
row = layout.row()
row.enabled = self.properties.option_faces
# row.prop(self.properties, "option_faces_deltas")
layout.separator()
row = layout.row()
row.prop(self.properties, "option_normals")
layout.separator()
row = layout.row()
row.prop(self.properties, "option_bones")
row.prop(self.properties, "option_skinning")
layout.separator()
row = layout.row()
row.label(text="Materials:")
row = layout.row()
row.prop(self.properties, "option_uv_coords")
row.prop(self.properties, "option_colors")
row = layout.row()
row.prop(self.properties, "option_materials")
layout.separator()
row = layout.row()
row.label(text="Settings:")
row = layout.row()
row.prop(self.properties, "align_model")
row = layout.row()
row.prop(self.properties, "option_flip_yz")
row.prop(self.properties, "option_scale")
layout.separator()
row = layout.row()
row.label(text="--------- Experimental ---------")
layout.separator()
row = layout.row()
row.label(text="Scene:")
row = layout.row()
row.prop(self.properties, "option_export_scene")
row.prop(self.properties, "option_embed_meshes")
row = layout.row()
row.prop(self.properties, "option_lights")
row.prop(self.properties, "option_cameras")
layout.separator()
row = layout.row()
row.label(text="Animation:")
row = layout.row()
row.prop(self.properties, "option_animation_morph")
row = layout.row()
row.prop(self.properties, "option_animation_skeletal")
row = layout.row()
row.prop(self.properties, "option_frame_index_as_time")
row = layout.row()
row.prop(self.properties, "option_frame_step")
layout.separator()
row = layout.row()
row.label(text="Settings:")
row = layout.row()
row.prop(self.properties, "option_all_meshes")
row = layout.row()
row.prop(self.properties, "option_copy_textures")
row = layout.row()
row.prop(self.properties, "option_url_base_html")
layout.separator()
# ################################################################
# Common
# ################################################################
def menu_func_export(self, context):
default_path = bpy.data.filepath.replace(".blend", ".js")
self.layout.operator(ExportTHREEJS.bl_idname, text="Three.js (.js)").filepath = default_path
def menu_func_import(self, context):
self.layout.operator(ImportTHREEJS.bl_idname, text="Three.js (.js)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register()
|
pquerna/tls-client-hello-stats
|
refs/heads/master
|
third_party/dpkt/dpkt/ssl.py
|
1
|
# $Id: ssl.py 84 2012-08-24 18:44:00Z andrewflnr@gmail.com $
# Portion Copyright 2012 Google Inc. All rights reserved.
"""Secure Sockets Layer / Transport Layer Security."""
import dpkt
import ssl_ciphersuites
import struct
import binascii
import traceback
import datetime
#
# Note from April 2011: cde...@gmail.com added code that parses SSL3/TLS messages more in depth.
#
# Jul 2012: afleenor@google.com modified and extended SSL support further.
#
class SSL2(dpkt.Packet):
__hdr__ = (
('len', 'H', 0),
('msg', 's', ''),
('pad', 's', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.len & 0x8000:
n = self.len = self.len & 0x7FFF
self.msg, self.data = self.data[:n], self.data[n:]
else:
n = self.len = self.len & 0x3FFF
padlen = ord(self.data[0])
self.msg = self.data[1:1+n]
self.pad = self.data[1+n:1+n+padlen]
self.data = self.data[1+n+padlen:]
# SSLv3/TLS versions
SSL3_V = 0x0300
TLS1_V = 0x0301
TLS11_V = 0x0302
TLS12_V = 0x0303
ssl3_versions_str = {
SSL3_V: 'SSL3',
TLS1_V: 'TLS 1.0',
TLS11_V: 'TLS 1.1',
TLS12_V: 'TLS 1.2'
}
SSL3_VERSION_BYTES = set(('\x03\x00', '\x03\x01', '\x03\x02', '\x03\x03'))
# Alert levels
SSL3_AD_WARNING = 1
SSL3_AD_FATAL = 2
alert_level_str = {
SSL3_AD_WARNING: 'SSL3_AD_WARNING',
SSL3_AD_FATAL: 'SSL3_AD_FATAL'
}
# SSL3 alert descriptions
SSL3_AD_CLOSE_NOTIFY = 0
SSL3_AD_UNEXPECTED_MESSAGE = 10 # fatal
SSL3_AD_BAD_RECORD_MAC = 20 # fatal
SSL3_AD_DECOMPRESSION_FAILURE = 30 # fatal
SSL3_AD_HANDSHAKE_FAILURE = 40 # fatal
SSL3_AD_NO_CERTIFICATE = 41
SSL3_AD_BAD_CERTIFICATE = 42
SSL3_AD_UNSUPPORTED_CERTIFICATE = 43
SSL3_AD_CERTIFICATE_REVOKED = 44
SSL3_AD_CERTIFICATE_EXPIRED = 45
SSL3_AD_CERTIFICATE_UNKNOWN = 46
SSL3_AD_ILLEGAL_PARAMETER = 47 # fatal
# TLS1 alert descriptions
TLS1_AD_DECRYPTION_FAILED = 21
TLS1_AD_RECORD_OVERFLOW = 22
TLS1_AD_UNKNOWN_CA = 48 # fatal
TLS1_AD_ACCESS_DENIED = 49 # fatal
TLS1_AD_DECODE_ERROR = 50 # fatal
TLS1_AD_DECRYPT_ERROR = 51
TLS1_AD_EXPORT_RESTRICTION = 60 # fatal
TLS1_AD_PROTOCOL_VERSION = 70 # fatal
TLS1_AD_INSUFFICIENT_SECURITY = 71 # fatal
TLS1_AD_INTERNAL_ERROR = 80 # fatal
TLS1_AD_USER_CANCELLED = 90
TLS1_AD_NO_RENEGOTIATION = 100
#/* codes 110-114 are from RFC3546 */
TLS1_AD_UNSUPPORTED_EXTENSION = 110
TLS1_AD_CERTIFICATE_UNOBTAINABLE = 111
TLS1_AD_UNRECOGNIZED_NAME = 112
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE = 113
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE = 114
TLS1_AD_UNKNOWN_PSK_IDENTITY = 115 # fatal
# Mapping alert types to strings
alert_description_str = {
SSL3_AD_CLOSE_NOTIFY: 'SSL3_AD_CLOSE_NOTIFY',
SSL3_AD_UNEXPECTED_MESSAGE: 'SSL3_AD_UNEXPECTED_MESSAGE',
SSL3_AD_BAD_RECORD_MAC: 'SSL3_AD_BAD_RECORD_MAC',
SSL3_AD_DECOMPRESSION_FAILURE: 'SSL3_AD_DECOMPRESSION_FAILURE',
SSL3_AD_HANDSHAKE_FAILURE: 'SSL3_AD_HANDSHAKE_FAILURE',
SSL3_AD_NO_CERTIFICATE: 'SSL3_AD_NO_CERTIFICATE',
SSL3_AD_BAD_CERTIFICATE: 'SSL3_AD_BAD_CERTIFICATE',
SSL3_AD_UNSUPPORTED_CERTIFICATE: 'SSL3_AD_UNSUPPORTED_CERTIFICATE',
SSL3_AD_CERTIFICATE_REVOKED: 'SSL3_AD_CERTIFICATE_REVOKED',
SSL3_AD_CERTIFICATE_EXPIRED: 'SSL3_AD_CERTIFICATE_EXPIRED',
SSL3_AD_CERTIFICATE_UNKNOWN: 'SSL3_AD_CERTIFICATE_UNKNOWN',
SSL3_AD_ILLEGAL_PARAMETER: 'SSL3_AD_ILLEGAL_PARAMETER',
TLS1_AD_DECRYPTION_FAILED: 'TLS1_AD_DECRYPTION_FAILED',
TLS1_AD_RECORD_OVERFLOW: 'TLS1_AD_RECORD_OVERFLOW',
TLS1_AD_UNKNOWN_CA: 'TLS1_AD_UNKNOWN_CA',
TLS1_AD_ACCESS_DENIED: 'TLS1_AD_ACCESS_DENIED',
TLS1_AD_DECODE_ERROR: 'TLS1_AD_DECODE_ERROR',
TLS1_AD_DECRYPT_ERROR: 'TLS1_AD_DECRYPT_ERROR',
TLS1_AD_EXPORT_RESTRICTION: 'TLS1_AD_EXPORT_RESTRICTION',
TLS1_AD_PROTOCOL_VERSION: 'TLS1_AD_PROTOCOL_VERSION',
TLS1_AD_INSUFFICIENT_SECURITY: 'TLS1_AD_INSUFFICIENT_SECURITY',
TLS1_AD_INTERNAL_ERROR: 'TLS1_AD_INTERNAL_ERROR',
TLS1_AD_USER_CANCELLED: 'TLS1_AD_USER_CANCELLED',
TLS1_AD_NO_RENEGOTIATION: 'TLS1_AD_NO_RENEGOTIATION',
TLS1_AD_UNSUPPORTED_EXTENSION: 'TLS1_AD_UNSUPPORTED_EXTENSION',
TLS1_AD_CERTIFICATE_UNOBTAINABLE: 'TLS1_AD_CERTIFICATE_UNOBTAINABLE',
TLS1_AD_UNRECOGNIZED_NAME: 'TLS1_AD_UNRECOGNIZED_NAME',
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE: 'TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE',
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE: 'TLS1_AD_BAD_CERTIFICATE_HASH_VALUE',
TLS1_AD_UNKNOWN_PSK_IDENTITY: 'TLS1_AD_UNKNOWN_PSK_IDENTITY'
}
# struct format strings for parsing buffer lengths
# don't forget, you have to pad a 3-byte value with \x00
_SIZE_FORMATS = ['!B', '!H', '!I', '!I']
def parse_variable_array(buf, lenbytes):
"""
Parse an array described using the 'Type name<x..y>' syntax from the spec
Read a length at the start of buf, and returns that many bytes
after, in a tuple with the TOTAL bytes consumed (including the size). This
does not check that the array is the right length for any given datatype.
"""
# first have to figure out how to parse length
assert lenbytes <= 4 # pretty sure 4 is impossible, too
size_format = _SIZE_FORMATS[lenbytes - 1]
padding = '\x00' if lenbytes == 3 else ''
# read off the length
size = struct.unpack(size_format, padding + buf[:lenbytes])[0]
# read the actual data
data = buf[lenbytes:lenbytes + size]
# if len(data) != size: insufficient data
return data, size + lenbytes
class SSL3Exception(Exception):
pass
class TLSRecord(dpkt.Packet):
"""
SSLv3 or TLSv1+ packet.
In addition to the fields specified in the header, there are
compressed and decrypted fields, indicating whether, in the language
of the spec, this is a TLSPlaintext, TLSCompressed, or
TLSCiphertext. The application will have to figure out when it's
appropriate to change these values.
"""
__hdr__ = (
('type', 'B', 0),
('version', 'H', 0),
('length', 'H', 0),
)
def __init__(self, *args, **kwargs):
# assume plaintext unless specified otherwise in arguments
self.compressed = kwargs.pop('compressed', False)
self.encrypted = kwargs.pop('encrypted', False)
# parent constructor
dpkt.Packet.__init__(self, *args, **kwargs)
# make sure length and data are consistent
self.length = len(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
header_length = self.__hdr_len__
self.data = buf[header_length:header_length+self.length]
# make sure buffer was long enough
if len(self.data) != self.length:
raise dpkt.NeedData('TLSRecord data was too short.')
# assume compressed and encrypted when it's been parsed from
# raw data
self.compressed = True
self.encrypted = True
class TLSChangeCipherSpec(dpkt.Packet):
"""
ChangeCipherSpec message is just a single byte with value 1
"""
__hdr__ = (('type', 'B', 1),)
class TLSAppData(str):
"""
As far as TLSRecord is concerned, AppData is just an opaque blob.
"""
pass
class TLSAlert(dpkt.Packet):
__hdr__ = (
('level', 'B', 1),
('description', 'B', 0),
)
class TLSHelloRequest(dpkt.Packet):
__hdr__ = tuple()
TLSExtensionTypes = {
0: 'server_name',
1: 'max_fragment_length',
2: 'client_certificate_url',
3: 'trusted_ca_keys',
4: 'truncated_hmac',
5: 'status_request',
6: 'user_mapping',
7: 'client_authz',
8: 'server_authz',
9: 'cert_type',
10: 'elliptic_curves',
11: 'ec_point_formats',
12: 'srp',
13: 'signature_algorithms',
14: 'use_srtp',
15: 'heartbeat',
35: 'session_tickets',
13172: 'next_protocol_negotiation',
65281: 'renegotiation_info',
}
class TLSExtension(object):
def __init__(self, extNumber, data):
self.data = data
self.value = extNumber
@property
def name(self):
return TLSExtensionTypes.get(self.value, 'unknown')
class TLSClientHello(dpkt.Packet):
__hdr__ = (
('version', 'H', 0x0301),
('random', '32s', '\x00'*32),
) # the rest is variable-length and has to be done manually
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# now session, cipher suites, extensions are in self.data
self.session_id, pointer = parse_variable_array(self.data, 1)
# print 'pointer',pointer
# handle ciphersuites
ciphersuites, parsed = parse_variable_array(self.data[pointer:], 2)
pointer += parsed
self.num_ciphersuites = len(ciphersuites) / 2
# check len(ciphersuites) % 2 == 0 ?
# compression methods
compression_methods, parsed = parse_variable_array(
self.data[pointer:], 1)
pointer += parsed
self.num_compression_methods = parsed - 1
self.compression_methods = map(ord, compression_methods)
self.extensions = []
if len(self.data[pointer:]) <= 0:
return
# skip total extensions length
pointer += 2
while len(self.data[pointer:]) > 0:
# extensions
extType = struct.unpack('!H', self.data[pointer:pointer+2])[0]
pointer += 2
extension, extensionLength = parse_variable_array(self.data[pointer:], 2)
pointer += extensionLength
self.extensions.append(TLSExtension(extType, extension))
class TLSServerHello(dpkt.Packet):
__hdr__ = (
('version', 'H', '0x0301'),
('random', '32s', '\x00'*32),
) # session is variable, forcing rest to be manual
def unpack(self, buf):
try:
dpkt.Packet.unpack(self, buf)
self.session_id, pointer = parse_variable_array(self.data, 1)
# single cipher suite
self.cipher_suite = struct.unpack('!H', self.data[pointer:pointer+2])[0]
pointer += 2
# single compression method
self.compression = struct.unpack('!B', self.data[pointer:pointer+1])[0]
pointer += 1
# ignore extensions for now
except struct.error:
# probably data too short
raise dpkt.NeedData
class TLSUnknownHandshake(dpkt.Packet):
__hdr__ = tuple()
TLSCertificate = TLSUnknownHandshake
TLSServerKeyExchange = TLSUnknownHandshake
TLSCertificateRequest = TLSUnknownHandshake
TLSServerHelloDone = TLSUnknownHandshake
TLSCertificateVerify = TLSUnknownHandshake
TLSClientKeyExchange = TLSUnknownHandshake
TLSFinished = TLSUnknownHandshake
# mapping of handshake type ids to their names
# and the classes that implement them
HANDSHAKE_TYPES = {
0: ('HelloRequest', TLSHelloRequest),
1: ('ClientHello', TLSClientHello),
2: ('ServerHello', TLSServerHello),
11: ('Certificate', TLSCertificate),
12: ('ServerKeyExchange', TLSServerKeyExchange),
13: ('CertificateRequest', TLSCertificateRequest),
14: ('ServerHelloDone', TLSServerHelloDone),
15: ('CertificateVerify', TLSCertificateVerify),
16: ('ClientKeyExchange', TLSClientKeyExchange),
20: ('Finished', TLSFinished),
}
class TLSHandshake(dpkt.Packet):
'''
A TLS Handshake message
This goes for all messages encapsulated in the Record layer, but especially
important for handshakes and app data: A message may be spread across a
number of TLSRecords, in addition to the possibility of there being more
than one in a given Record. You have to put together the contents of
TLSRecord's yourself.
'''
# struct.unpack can't handle the 3-byte int, so we parse it as bytes
# (and store it as bytes so dpkt doesn't get confused), and turn it into
# an int in a user-facing property
__hdr__ = (
('type', 'B', 0),
('length_bytes', '3s', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Wait, might there be more than one message of self.type?
embedded_type = HANDSHAKE_TYPES.get(self.type, None)
if embedded_type is None:
raise SSL3Exception('Unknown or invalid handshake type %d' %
self.type)
# only take the right number of bytes
self.data = self.data[:self.length]
if len(self.data) != self.length:
raise dpkt.NeedData
# get class out of embedded_type tuple
self.data = embedded_type[1](self.data)
@property
def length(self):
return struct.unpack('!I', '\x00' + self.length_bytes)[0]
RECORD_TYPES = {
20: TLSChangeCipherSpec,
21: TLSAlert,
22: TLSHandshake,
23: TLSAppData,
}
class SSLFactory(object):
def __new__(cls, buf):
v = buf[1:3]
if v in [ '\x03\x00', '\x03\x01', '\x03\x02' ]:
return SSL3(buf)
# SSL2 has no characteristic header or magic bytes, so we just assume
# that the msg is an SSL2 msg if it is not detected as SSL3+
return SSL2(buf)
def TLSMultiFactory(buf):
'''
Attempt to parse one or more TLSRecord's out of buf
Args:
buf: string containing SSL/TLS messages. May have an incomplete record
on the end
Returns:
[TLSRecord]
int, total bytes consumed, != len(buf) if an incomplete record was left at
the end.
Raises ...?
'''
if not buf:
return [], 0
v = buf[1:3]
if v in SSL3_VERSION_BYTES:
try:
msg = TLSRecord(buf)
parsed_bytes = len(msg) # len fn includes header length
except dpkt.NeedData:
return [], 0 # tell caller we parsed nothing
else:
raise SSL3Exception('Bad TLS version in buf: %r' % buf[:5])
later_messages, later_bytes = TLSMultiFactory(buf[len(msg):])
return [msg] + later_messages, parsed_bytes + later_bytes
import unittest
_hexdecode = binascii.a2b_hex
class TLSRecordTest(unittest.TestCase):
"""
Test basic TLSRecord functionality
For this test, the contents of the record doesn't matter, since we're not
parsing the next layer.
"""
def setUp(self):
# add some extra data, to make sure length is parsed correctly
self.p = TLSRecord('\x17\x03\x01\x00\x08abcdefghzzzzzzzzzzz')
def testContentType(self):
self.assertEqual(self.p.type, 23)
def testVersion(self):
self.assertEqual(self.p.version, 0x0301)
def testLength(self):
self.assertEqual(self.p.length, 8)
def testData(self):
self.assertEqual(self.p.data, 'abcdefgh')
def testInitialFlags(self):
self.assertTrue(self.p.compressed)
self.assertTrue(self.p.encrypted)
def testRepack(self):
p2 = TLSRecord(type=23, version=0x0301, data='abcdefgh')
self.assertEqual(p2.type, 23)
self.assertEqual(p2.version, 0x0301)
self.assertEqual(p2.length, 8)
self.assertEqual(p2.data, 'abcdefgh')
self.assertEqual(p2.pack(), self.p.pack())
def testTotalLength(self):
# that len(p) includes header
self.assertEqual(len(self.p), 13)
def testRaisesNeedDataWhenBufIsShort(self):
self.assertRaises(
dpkt.NeedData,
TLSRecord,
'\x16\x03\x01\x00\x10abc')
class TLSChangeCipherSpecTest(unittest.TestCase):
"It's just a byte. This will be quick, I promise"
def setUp(self):
self.p = TLSChangeCipherSpec('\x01')
def testParses(self):
self.assertEqual(self.p.type, 1)
def testTotalLength(self):
self.assertEqual(len(self.p), 1)
class TLSAppDataTest(unittest.TestCase):
"AppData is basically just a string"
def testValue(self):
d = TLSAppData('abcdefgh')
self.assertEqual(d, 'abcdefgh')
class TLSHandshakeTest(unittest.TestCase):
def setUp(self):
self.h = TLSHandshake('\x00\x00\x00\x01\xff')
def testCreatedInsideMessage(self):
self.assertTrue(isinstance(self.h.data, TLSHelloRequest))
def testLength(self):
self.assertEqual(self.h.length, 0x01)
def testRaisesNeedData(self):
self.assertRaises(dpkt.NeedData, TLSHandshake, '\x00\x00\x01\x01')
class ClientHelloTest(unittest.TestCase):
'This data is extracted from and verified by Wireshark'
def setUp(self):
self.data = _hexdecode(
"01000199" # handshake header
"0301" # version
"5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d" # rand
"2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1" # session id
# cipher suites
"005400ffc00ac0140088008700390038c00fc00500840035c007c009c011c0130045004400330032c00cc00ec002c0040096004100050004002fc008c01200160013c00dc003feff000ac006c010c00bc00100020001"
"0100" # compresssion methods
# extensions
"00fc0000000e000c0000096c6f63616c686f7374000a00080006001700180019000b00020100002300d0a50b2e9f618a9ea9bf493ef49b421835cd2f6b05bbe1179d8edf70d58c33d656e8696d36d7e7e0b9d3ecc0e4de339552fa06c64c0fcb550a334bc43944e2739ca342d15a9ebbe981ac87a0d38160507d47af09bdc16c5f0ee4cdceea551539382333226048a026d3a90a0535f4a64236467db8fee22b041af986ad0f253bc369137cd8d8cd061925461d7f4d7895ca9a4181ab554dad50360ac31860e971483877c9335ac1300c5e78f3e56f3b8e0fc16358fcaceefd5c8d8aaae7b35be116f8832856ca61144fcdd95e071b94d0cf7233740000"
"FFFFFFFFFFFFFFFF") # random garbage
self.p = TLSHandshake(self.data)
def testClientHelloConstructed(self):
'Make sure the correct class was constructed'
#print self.p
self.assertTrue(isinstance(self.p.data, TLSClientHello))
# def testClientDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def testClientRandomCorrect(self):
self.assertEqual(self.p.data.random,
_hexdecode('5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d'))
def testCipherSuiteLength(self):
# we won't bother testing the identity of each cipher suite in the list.
self.assertEqual(self.p.data.num_ciphersuites, 42)
#self.assertEqual(len(self.p.ciphersuites), 42)
def testSessionId(self):
self.assertEqual(self.p.data.session_id,
_hexdecode('09bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1'))
def testCompressionMethods(self):
self.assertEqual(self.p.data.num_compression_methods, 1)
def testTotalLength(self):
self.assertEqual(len(self.p), 413)
class ServerHelloTest(unittest.TestCase):
'Again, from Wireshark'
def setUp(self):
self.data = _hexdecode('0200004d03015008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed10002000005ff01000100')
self.p = TLSHandshake(self.data)
def testConstructed(self):
self.assertTrue(isinstance(self.p.data, TLSServerHello))
# def testDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def testRandomCorrect(self):
self.assertEqual(self.p.data.random,
_hexdecode('5008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd'))
def testCipherSuite(self):
self.assertEqual(self.p.data.cipher_suite.name, 'TLS_RSA_WITH_NULL_SHA')
def testTotalLength(self):
self.assertEqual(len(self.p), 81)
class TLSMultiFactoryTest(unittest.TestCase):
"Made up test data"
def setUp(self):
self.data = _hexdecode('1703010010' # header 1
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' # data 1
'1703010010' # header 2
'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' # data 2
'1703010010' # header 3
'CCCCCCCC') # data 3 (incomplete)
self.msgs, self.bytes_parsed = TLSMultiFactory(self.data)
def testNumMessages(self):
# only complete messages should be parsed, incomplete ones left
# in buffer
self.assertEqual(len(self.msgs), 2)
def testBytesParsed(self):
self.assertEqual(self.bytes_parsed, (5 + 16) * 2)
def testFirstMsgData(self):
self.assertEqual(self.msgs[0].data, _hexdecode('AA' * 16))
def testSecondMsgData(self):
self.assertEqual(self.msgs[1].data, _hexdecode('BB' * 16))
if __name__ == '__main__':
unittest.main()
|
draperjames/bokeh
|
refs/heads/master
|
examples/app/export_csv/main.py
|
4
|
from os.path import dirname, join
import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import ColumnDataSource, CustomJS
from bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter
from bokeh.io import curdoc
df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))
source = ColumnDataSource(data=dict())
def update():
current = df[df['salary'] <= slider.value].dropna()
source.data = {
'name' : current.name,
'salary' : current.salary,
'years_experience' : current.years_experience,
}
slider = Slider(title="Max Salary", start=10000, end=250000, value=150000, step=1000)
slider.on_change('value', lambda attr, old, new: update())
button = Button(label="Download", button_type="success")
button.callback = CustomJS(args=dict(source=source),
code=open(join(dirname(__file__), "download.js")).read())
columns = [
TableColumn(field="name", title="Employee Name"),
TableColumn(field="salary", title="Income", formatter=NumberFormatter(format="$0,0.00")),
TableColumn(field="years_experience", title="Experience (years)")
]
data_table = DataTable(source=source, columns=columns, width=800)
controls = widgetbox(slider, button)
table = widgetbox(data_table)
curdoc().add_root(row(controls, table))
curdoc().title = "Export CSV"
update()
|
strk/QGIS
|
refs/heads/master
|
tests/src/python/test_provider_memory.py
|
5
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for the memory layer provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '2015-04-23'
__copyright__ = 'Copyright 2015, The QGIS Project'
from urllib.parse import parse_qs
from qgis.core import (
QgsField,
QgsFields,
QgsLayerDefinition,
QgsPointXY,
QgsReadWriteContext,
QgsVectorLayer,
QgsFeatureRequest,
QgsFeature,
QgsGeometry,
QgsWkbTypes,
NULL,
QgsMemoryProviderUtils,
QgsCoordinateReferenceSystem,
QgsRectangle,
QgsTestUtils,
QgsFeatureSource
)
from qgis.testing import (
start_app,
unittest
)
from utilities import (
unitTestDataPath,
compareWkt
)
from providertestbase import ProviderTestCase
from qgis.PyQt.QtCore import QVariant, QByteArray
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsMemoryProvider(unittest.TestCase, ProviderTestCase):
@classmethod
def createLayer(cls):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
assert (vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, -200, NULL, 'NuLl', '5'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature()
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3'])
f3 = QgsFeature()
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1'])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature()
f4.setAttributes([2, 200, 'Apple', 'Apple', '2'])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature()
f5.setAttributes([4, 400, 'Honey', 'Honey', '4'])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
return vl
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
cls.vl = cls.createLayer()
assert (cls.vl.isValid())
cls.source = cls.vl.dataProvider()
# poly layer
cls.poly_vl = QgsVectorLayer('Polygon?crs=epsg:4326&field=pk:integer&key=pk',
'test', 'memory')
assert (cls.poly_vl.isValid())
cls.poly_provider = cls.poly_vl.dataProvider()
f1 = QgsFeature()
f1.setAttributes([1])
f1.setGeometry(QgsGeometry.fromWkt('Polygon ((-69.03664108 81.35818902, -69.09237722 80.24346619, -73.718477 80.1319939, -73.718477 76.28620011, -74.88893598 76.34193625, -74.83319983 81.35818902, -69.03664108 81.35818902))'))
f2 = QgsFeature()
f2.setAttributes([2])
f2.setGeometry(QgsGeometry.fromWkt('Polygon ((-67.58750139 81.1909806, -66.30557012 81.24671674, -66.30557012 76.89929767, -67.58750139 76.89929767, -67.58750139 81.1909806))'))
f3 = QgsFeature()
f3.setAttributes([3])
f3.setGeometry(QgsGeometry.fromWkt('Polygon ((-68.36780737 75.78457483, -67.53176524 72.60761475, -68.64648808 73.66660144, -70.20710006 72.9420316, -68.36780737 75.78457483))'))
f4 = QgsFeature()
f4.setAttributes([4])
cls.poly_provider.addFeatures([f1, f2, f3, f4])
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def getEditableLayer(self):
return self.createLayer()
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this test for memory provider, as it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this test for memory provider, as it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testCtors(self):
testVectors = ["Point", "LineString", "Polygon", "MultiPoint", "MultiLineString", "MultiPolygon", "None"]
for v in testVectors:
layer = QgsVectorLayer(v, "test", "memory")
assert layer.isValid(), "Failed to create valid %s memory layer" % (v)
def testLayerGeometry(self):
testVectors = [("Point", QgsWkbTypes.PointGeometry, QgsWkbTypes.Point),
("LineString", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineString),
("Polygon", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.Polygon),
("MultiPoint", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPoint),
("MultiLineString", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineString),
("MultiPolygon", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygon),
("PointZ", QgsWkbTypes.PointGeometry, QgsWkbTypes.PointZ),
("LineStringZ", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineStringZ),
("PolygonZ", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.PolygonZ),
("MultiPointZ", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPointZ),
("MultiLineStringZ", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineStringZ),
("MultiPolygonZ", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygonZ),
("PointM", QgsWkbTypes.PointGeometry, QgsWkbTypes.PointM),
("LineStringM", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineStringM),
("PolygonM", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.PolygonM),
("MultiPointM", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPointM),
("MultiLineStringM", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineStringM),
("MultiPolygonM", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygonM),
("PointZM", QgsWkbTypes.PointGeometry, QgsWkbTypes.PointZM),
("LineStringZM", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineStringZM),
("PolygonZM", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.PolygonZM),
("MultiPointZM", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPointZM),
("MultiLineStringZM", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineStringZM),
("MultiPolygonZM", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygonZM),
("Point25D", QgsWkbTypes.PointGeometry, QgsWkbTypes.Point25D),
("LineString25D", QgsWkbTypes.LineGeometry, QgsWkbTypes.LineString25D),
("Polygon25D", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.Polygon25D),
("MultiPoint25D", QgsWkbTypes.PointGeometry, QgsWkbTypes.MultiPoint25D),
("MultiLineString25D", QgsWkbTypes.LineGeometry, QgsWkbTypes.MultiLineString25D),
("MultiPolygon25D", QgsWkbTypes.PolygonGeometry, QgsWkbTypes.MultiPolygon25D),
("None", QgsWkbTypes.NullGeometry, QgsWkbTypes.NoGeometry)]
for v in testVectors:
layer = QgsVectorLayer(v[0], "test", "memory")
myMessage = ('Expected: %s\nGot: %s\n' %
(v[1], layer.geometryType()))
assert layer.geometryType() == v[1], myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(v[2], layer.wkbType()))
assert layer.wkbType() == v[2], myMessage
def testAddFeatures(self):
layer = QgsVectorLayer("Point", "test", "memory")
provider = layer.dataProvider()
res = provider.addAttributes([QgsField("name", QVariant.String),
QgsField("age", QVariant.Int),
QgsField("size", QVariant.Double)])
assert res, "Failed to add attributes"
myMessage = ('Expected: %s\nGot: %s\n' %
(3, len(provider.fields())))
assert len(provider.fields()) == 3, myMessage
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(["Johny",
20,
0.3])
res, t = provider.addFeatures([ft])
assert res, "Failed to add feature"
myMessage = ('Expected: %s\nGot: %s\n' %
(1, provider.featureCount()))
assert provider.featureCount() == 1, myMessage
for f in provider.getFeatures(QgsFeatureRequest()):
myMessage = ('Expected: %s\nGot: %s\n' %
("Johny", f[0]))
assert f[0] == "Johny", myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(20, f[1]))
assert f[1] == 20, myMessage
myMessage = ('Expected: %s\nGot: %s\n' %
(0.3, f[2]))
assert (f[2] - 0.3) < 0.0000001, myMessage
geom = f.geometry()
myMessage = ('Expected: %s\nGot: %s\n' %
("Point (10 10)", str(geom.asWkt())))
assert compareWkt(str(geom.asWkt()), "Point (10 10)"), myMessage
def testClone(self):
"""
Test that cloning a memory layer also clones features
"""
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=f1:integer&field=f2:integer',
'test', 'memory')
self.assertTrue(vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, -200])
f2 = QgsFeature()
f2.setAttributes([3, 300])
f3 = QgsFeature()
f3.setAttributes([1, 100])
res, [f1, f2, f3] = vl.dataProvider().addFeatures([f1, f2, f3])
self.assertEqual(vl.featureCount(), 3)
vl2 = vl.clone()
self.assertEqual(vl2.featureCount(), 3)
features = [f for f in vl2.getFeatures()]
self.assertTrue([f for f in features if f['f1'] == 5])
self.assertTrue([f for f in features if f['f1'] == 3])
self.assertTrue([f for f in features if f['f1'] == 1])
def testGetFields(self):
layer = QgsVectorLayer("Point", "test", "memory")
provider = layer.dataProvider()
provider.addAttributes([QgsField("name", QVariant.String),
QgsField("age", QVariant.Int),
QgsField("size", QVariant.Double)])
myMessage = ('Expected: %s\nGot: %s\n' %
(3, len(provider.fields())))
assert len(provider.fields()) == 3, myMessage
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(["Johny",
20,
0.3])
provider.addFeatures([ft])
for f in provider.getFeatures(QgsFeatureRequest()):
myMessage = ('Expected: %s\nGot: %s\n' %
("Johny", f['name']))
self.assertEqual(f["name"], "Johny", myMessage)
def testFromUri(self):
"""Test we can construct the mem provider from a uri"""
myMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=age:integer&field=size:double&index=yes'),
'test',
'memory')
assert myMemoryLayer is not None, 'Provider not initialized'
myProvider = myMemoryLayer.dataProvider()
assert myProvider is not None
def testLengthPrecisionFromUri(self):
"""Test we can assign length and precision from a uri"""
myMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=size:double(12,9)&index=yes'),
'test',
'memory')
self.assertEqual(myMemoryLayer.fields().field('size').length(), 12)
self.assertEqual(myMemoryLayer.fields().field('size').precision(), 9)
myMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=size:double(-1,-1)&index=yes'),
'test',
'memory')
self.assertEqual(myMemoryLayer.fields().field('size').length(), -1)
self.assertEqual(myMemoryLayer.fields().field('size').precision(), -1)
myMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=size:string(-1)&index=yes'),
'test',
'memory')
self.assertEqual(myMemoryLayer.fields().field('size').length(), -1)
def testFromUriWithEncodedField(self):
"""Test we can construct the mem provider from a uri when a field name is encoded"""
layer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=test%2Ffield:integer'),
'test',
'memory')
self.assertTrue(layer.isValid())
self.assertEqual([f.name() for f in layer.fields()], ['name', 'test/field'])
def testSaveFields(self):
# Create a new memory layer with no fields
myMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&index=yes'),
'test',
'memory')
# Add some fields to the layer
myFields = [QgsField('TestInt', QVariant.Int, 'integer', 2, 0),
QgsField('TestLong', QVariant.LongLong, 'long', -1, 0),
QgsField('TestDbl', QVariant.Double, 'double', 8, 6),
QgsField('TestString', QVariant.String, 'string', 50, 0),
QgsField('TestDate', QVariant.Date, 'date'),
QgsField('TestTime', QVariant.Time, 'time'),
QgsField('TestDateTime', QVariant.DateTime, 'datetime')]
assert myMemoryLayer.startEditing()
for f in myFields:
assert myMemoryLayer.addAttribute(f)
assert myMemoryLayer.commitChanges()
myMemoryLayer.updateFields()
# Export the layer to a layer-definition-XML
qlr = QgsLayerDefinition.exportLayerDefinitionLayers([myMemoryLayer], QgsReadWriteContext())
assert qlr is not None
# Import the layer from the layer-definition-XML
layers = QgsLayerDefinition.loadLayerDefinitionLayers(qlr, QgsReadWriteContext())
assert layers is not None
myImportedLayer = layers[0]
assert myImportedLayer is not None
# Check for the presence of the fields
importedFields = myImportedLayer.fields()
assert importedFields is not None
for f in myFields:
assert f == importedFields.field(f.name())
def testRenameAttributes(self):
layer = QgsVectorLayer("Point", "test", "memory")
provider = layer.dataProvider()
res = provider.addAttributes([QgsField("name", QVariant.String),
QgsField("age", QVariant.Int),
QgsField("size", QVariant.Double)])
layer.updateFields()
assert res, "Failed to add attributes"
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(["Johny",
20,
0.3])
res, t = provider.addFeatures([ft])
# bad rename
self.assertFalse(provider.renameAttributes({-1: 'not_a_field'}))
self.assertFalse(provider.renameAttributes({100: 'not_a_field'}))
# already exists
self.assertFalse(provider.renameAttributes({1: 'name'}))
# rename one field
self.assertTrue(provider.renameAttributes({1: 'this_is_the_new_age'}))
self.assertEqual(provider.fields().at(1).name(), 'this_is_the_new_age')
layer.updateFields()
fet = next(layer.getFeatures())
self.assertEqual(fet.fields()[1].name(), 'this_is_the_new_age')
# rename two fields
self.assertTrue(provider.renameAttributes({1: 'mapinfo_is_the_stone_age', 2: 'super_size'}))
self.assertEqual(provider.fields().at(1).name(), 'mapinfo_is_the_stone_age')
self.assertEqual(provider.fields().at(2).name(), 'super_size')
layer.updateFields()
fet = next(layer.getFeatures())
self.assertEqual(fet.fields()[1].name(), 'mapinfo_is_the_stone_age')
self.assertEqual(fet.fields()[2].name(), 'super_size')
def testUniqueSource(self):
"""
Similar memory layers should have unique source - some code checks layer source to identify
matching layers
"""
layer = QgsVectorLayer("Point", "test", "memory")
layer2 = QgsVectorLayer("Point", "test2", "memory")
self.assertNotEqual(layer.source(), layer2.source())
def testCreateMemoryLayer(self):
"""
Test QgsMemoryProviderUtils.createMemoryLayer()
"""
# no fields
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields())
self.assertTrue(layer.isValid())
self.assertEqual(layer.name(), 'my name')
self.assertTrue(layer.fields().isEmpty())
# similar layers should have unique sources
layer2 = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields())
self.assertNotEqual(layer.source(), layer2.source())
# geometry type
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields(), QgsWkbTypes.Point)
self.assertTrue(layer.isValid())
self.assertEqual(layer.wkbType(), QgsWkbTypes.Point)
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields(), QgsWkbTypes.PolygonZM)
self.assertTrue(layer.isValid())
self.assertEqual(layer.wkbType(), QgsWkbTypes.PolygonZM)
# crs
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', QgsFields(), QgsWkbTypes.PolygonZM, QgsCoordinateReferenceSystem.fromEpsgId(3111))
self.assertTrue(layer.isValid())
self.assertEqual(layer.wkbType(), QgsWkbTypes.PolygonZM)
self.assertTrue(layer.crs().isValid())
self.assertEqual(layer.crs().authid(), 'EPSG:3111')
# fields
fields = QgsFields()
fields.append(QgsField("string", QVariant.String))
fields.append(QgsField("long", QVariant.LongLong))
fields.append(QgsField("double", QVariant.Double))
fields.append(QgsField("integer", QVariant.Int))
fields.append(QgsField("date", QVariant.Date))
fields.append(QgsField("datetime", QVariant.DateTime))
fields.append(QgsField("time", QVariant.Time))
fields.append(QgsField("#complex_name", QVariant.String))
fields.append(QgsField("complex/name", QVariant.String))
fields.append(QgsField("binaryfield", QVariant.ByteArray))
fields.append(QgsField("boolfield", QVariant.Bool))
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', fields)
self.assertTrue(layer.isValid())
self.assertFalse(layer.fields().isEmpty())
self.assertEqual(len(layer.fields()), len(fields))
for i in range(len(fields)):
self.assertEqual(layer.fields()[i].name(), fields[i].name())
self.assertEqual(layer.fields()[i].type(), fields[i].type())
self.assertEqual(layer.fields()[i].length(), fields[i].length())
self.assertEqual(layer.fields()[i].precision(), fields[i].precision(), fields[i].name())
# unsupported field type
fields = QgsFields()
fields.append(QgsField("rect", QVariant.RectF))
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', fields)
self.assertTrue(layer.isValid())
self.assertFalse(layer.fields().isEmpty())
self.assertEqual(layer.fields()[0].name(), 'rect')
self.assertEqual(layer.fields()[0].type(), QVariant.String) # should be mapped to string
# field precision
fields = QgsFields()
fields.append(QgsField("string", QVariant.String, len=10))
fields.append(QgsField("long", QVariant.LongLong, len=6))
fields.append(QgsField("double", QVariant.Double, len=10, prec=7))
fields.append(QgsField("double2", QVariant.Double, len=-1, prec=-1))
layer = QgsMemoryProviderUtils.createMemoryLayer('my name', fields)
self.assertTrue(layer.isValid())
self.assertFalse(layer.fields().isEmpty())
self.assertEqual(len(layer.fields()), len(fields))
for i in range(len(fields)):
self.assertEqual(layer.fields()[i].name(), fields[i].name())
self.assertEqual(layer.fields()[i].type(), fields[i].type())
self.assertEqual(layer.fields()[i].length(), fields[i].length())
self.assertEqual(layer.fields()[i].precision(), fields[i].precision())
def testThreadSafetyWithIndex(self):
layer = QgsVectorLayer('Point?crs=epsg:4326&index=yes&field=pk:integer&field=cnt:int8&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
provider = layer.dataProvider()
f = QgsFeature()
f.setAttributes([5, -200, NULL, 'NuLl', '5'])
f.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
for i in range(100000):
provider.addFeatures([f])
# filter rect request
extent = QgsRectangle(-73, 70, -63, 80)
request = QgsFeatureRequest().setFilterRect(extent)
self.assertTrue(QgsTestUtils.testProviderIteratorThreadSafety(self.source, request))
def testMinMaxCache(self):
"""
Test that min/max cache is appropriately cleared
:return:
"""
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=f1:integer&field=f2:integer',
'test', 'memory')
self.assertTrue(vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, -200])
f2 = QgsFeature()
f2.setAttributes([3, 300])
f3 = QgsFeature()
f3.setAttributes([1, 100])
f4 = QgsFeature()
f4.setAttributes([2, 200])
f5 = QgsFeature()
f5.setAttributes([4, 400])
res, [f1, f2, f3, f4, f5] = vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 5)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# add feature
f6 = QgsFeature()
f6.setAttributes([15, 1400])
res, [f6] = vl.dataProvider().addFeatures([f6])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 15)
self.assertEqual(vl.dataProvider().maximumValue(1), 1400)
f7 = QgsFeature()
f7.setAttributes([-1, -1400])
res, [f7] = vl.dataProvider().addFeatures([f7])
self.assertTrue(res)
self.assertEqual(vl.dataProvider().minimumValue(0), -1)
self.assertEqual(vl.dataProvider().minimumValue(1), -1400)
self.assertEqual(vl.dataProvider().maximumValue(0), 15)
self.assertEqual(vl.dataProvider().maximumValue(1), 1400)
# change attribute values
self.assertTrue(vl.dataProvider().changeAttributeValues({f6.id(): {0: 3, 1: 150}, f7.id(): {0: 4, 1: -100}}))
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -200)
self.assertEqual(vl.dataProvider().maximumValue(0), 5)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# delete features
self.assertTrue(vl.dataProvider().deleteFeatures([f4.id(), f1.id()]))
self.assertEqual(vl.dataProvider().minimumValue(0), 1)
self.assertEqual(vl.dataProvider().minimumValue(1), -100)
self.assertEqual(vl.dataProvider().maximumValue(0), 4)
self.assertEqual(vl.dataProvider().maximumValue(1), 400)
# delete attributes
self.assertTrue(vl.dataProvider().deleteAttributes([0]))
self.assertEqual(vl.dataProvider().minimumValue(0), -100)
self.assertEqual(vl.dataProvider().maximumValue(0), 400)
def testBinary(self):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=f1:integer&field=f2:binary',
'test', 'memory')
self.assertTrue(vl.isValid())
dp = vl.dataProvider()
fields = dp.fields()
self.assertEqual([f.name() for f in fields], ['f1', 'f2'])
self.assertEqual([f.type() for f in fields], [QVariant.Int, QVariant.ByteArray])
self.assertEqual([f.typeName() for f in fields], ['integer', 'binary'])
f = QgsFeature(dp.fields())
bin_1 = b'xxx'
bin_val1 = QByteArray(bin_1)
f.setAttributes([1, bin_val1])
self.assertTrue(dp.addFeature(f))
f2 = [f for f in dp.getFeatures()][0]
self.assertEqual(f2.attributes(), [1, bin_val1])
# add binary field
self.assertTrue(dp.addAttributes([QgsField('binfield2', QVariant.ByteArray, 'Binary')]))
fields = dp.fields()
bin2_field = fields[fields.lookupField('binfield2')]
self.assertEqual(bin2_field.type(), QVariant.ByteArray)
self.assertEqual(bin2_field.typeName(), 'Binary')
f = QgsFeature(fields)
bin_2 = b'yyy'
bin_val2 = QByteArray(bin_2)
f.setAttributes([2, NULL, bin_val2])
self.assertTrue(dp.addFeature(f))
f1 = [f for f in dp.getFeatures()][0]
self.assertEqual(f1.attributes(), [1, bin_val1, NULL])
f2 = [f for f in dp.getFeatures()][1]
self.assertEqual(f2.attributes(), [2, NULL, bin_val2])
def testBool(self):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=f1:integer&field=f2:bool',
'test', 'memory')
self.assertTrue(vl.isValid())
dp = vl.dataProvider()
fields = dp.fields()
self.assertEqual([f.name() for f in fields], ['f1', 'f2'])
self.assertEqual([f.type() for f in fields], [QVariant.Int, QVariant.Bool])
self.assertEqual([f.typeName() for f in fields], ['integer', 'boolean'])
f = QgsFeature(dp.fields())
f.setAttributes([1, True])
f2 = QgsFeature(dp.fields())
f2.setAttributes([2, False])
f3 = QgsFeature(dp.fields())
f3.setAttributes([3, NULL])
self.assertTrue(dp.addFeatures([f, f2, f3]))
self.assertEqual([f.attributes() for f in dp.getFeatures()], [[1, True], [2, False], [3, NULL]])
# add boolean field
self.assertTrue(dp.addAttributes([QgsField('boolfield2', QVariant.Bool, 'Boolean')]))
fields = dp.fields()
bool2_field = fields[fields.lookupField('boolfield2')]
self.assertEqual(bool2_field.type(), QVariant.Bool)
self.assertEqual(bool2_field.typeName(), 'Boolean')
f = QgsFeature(fields)
f.setAttributes([2, NULL, True])
self.assertTrue(dp.addFeature(f))
self.assertEqual([f.attributes() for f in dp.getFeatures()], [[1, True, NULL], [2, False, NULL], [3, NULL, NULL], [2, NULL, True]])
def testSpatialIndex(self):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=f1:integer&field=f2:bool',
'test', 'memory')
self.assertEqual(vl.hasSpatialIndex(), QgsFeatureSource.SpatialIndexNotPresent)
vl.dataProvider().createSpatialIndex()
self.assertEqual(vl.hasSpatialIndex(), QgsFeatureSource.SpatialIndexPresent)
def testClone(self):
"""Test that a cloned layer has a single new id and
the same fields as the source layer"""
vl = QgsVectorLayer(
'Point?crs=epsg:4326',
'test', 'memory')
self.assertTrue(vl.isValid)
dp = vl.dataProvider()
self.assertTrue(dp.addAttributes([QgsField("name", QVariant.String),
QgsField("age", QVariant.Int),
QgsField("size", QVariant.Double)]))
vl2 = vl.clone()
self.assertTrue('memory?geometry=Point&crs=EPSG:4326&field=name:(0,0)&field=age:(0,0)&field=size:(0,0)' in vl2.publicSource())
self.assertEqual(len(parse_qs(vl.publicSource())['uid']), 1)
self.assertEqual(len(parse_qs(vl2.publicSource())['uid']), 1)
self.assertNotEqual(parse_qs(vl2.publicSource())['uid'][0], parse_qs(vl.publicSource())['uid'][0])
class TestPyQgsMemoryProviderIndexed(unittest.TestCase, ProviderTestCase):
"""Runs the provider test suite against an indexed memory layer"""
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
cls.vl = QgsVectorLayer('Point?crs=epsg:4326&index=yes&field=pk:integer&field=cnt:int8&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
assert (cls.vl.isValid())
cls.source = cls.vl.dataProvider()
f1 = QgsFeature()
f1.setAttributes([5, -200, NULL, 'NuLl', '5'])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature()
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3'])
f3 = QgsFeature()
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1'])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature()
f4.setAttributes([2, 200, 'Apple', 'Apple', '2'])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature()
f5.setAttributes([4, 400, 'Honey', 'Honey', '4'])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
cls.source.addFeatures([f1, f2, f3, f4, f5])
# poly layer
cls.poly_vl = QgsVectorLayer('Polygon?crs=epsg:4326&index=yes&field=pk:integer&key=pk',
'test', 'memory')
assert (cls.poly_vl.isValid())
cls.poly_provider = cls.poly_vl.dataProvider()
f1 = QgsFeature()
f1.setAttributes([1])
f1.setGeometry(QgsGeometry.fromWkt('Polygon ((-69.0 81.4, -69.0 80.2, -73.7 80.2, -73.7 76.3, -74.9 76.3, -74.9 81.4, -69.0 81.4))'))
f2 = QgsFeature()
f2.setAttributes([2])
f2.setGeometry(QgsGeometry.fromWkt('Polygon ((-67.6 81.2, -66.3 81.2, -66.3 76.9, -67.6 76.9, -67.6 81.2))'))
f3 = QgsFeature()
f3.setAttributes([3])
f3.setGeometry(QgsGeometry.fromWkt('Polygon ((-68.4 75.8, -67.5 72.6, -68.6 73.7, -70.2 72.9, -68.4 75.8))'))
f4 = QgsFeature()
f4.setAttributes([4])
cls.poly_provider.addFeatures([f1, f2, f3, f4])
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this test for memory provider, as it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this test for memory provider, as it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
if __name__ == '__main__':
unittest.main()
|
webdev1001/ansible
|
refs/heads/devel
|
v2/ansible/utils/debug.py
|
16
|
import os
import time
import sys
from multiprocessing import Lock
from ansible import constants as C
global_debug_lock = Lock()
def debug(msg):
if C.DEFAULT_DEBUG:
global_debug_lock.acquire()
print("%6d %0.5f: %s" % (os.getpid(), time.time(), msg))
sys.stdout.flush()
global_debug_lock.release()
|
Intel-tensorflow/tensorflow
|
refs/heads/master
|
tensorflow/examples/speech_commands/accuracy_utils.py
|
16
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for getting accuracy statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class StreamingAccuracyStats(object):
"""Get streaming accuracy statistics every time a new command is founded.
Attributes:
_how_many_gt: How many ground truths.
_how_many_gt_matched: How many ground truths have been matched.
_how_many_fp: How many commands have been fired as false positive.
_how_many_c: How many commands have been fired correctly.
_how_many_w: How many commands have been fired wrongly.
_gt_occurrence: A list to record which commands and when it occurs in the
input audio stream.
_previous_c: A variable to record the last status of _how_many_c.
_previous_w: A variable to record the last status of _how_many_w.
_previous_fp: A variable to record the last status of _how_many_fp.
"""
def __init__(self):
"""Init StreamingAccuracyStats with void or zero values."""
self._how_many_gt = 0
self._how_many_gt_matched = 0
self._how_many_fp = 0
self._how_many_c = 0
self._how_many_w = 0
self._gt_occurrence = []
self._previous_c = 0
self._previous_w = 0
self._previous_fp = 0
def read_ground_truth_file(self, file_name):
"""Load ground truth and timestamp pairs and store it in time order."""
with open(file_name, 'r') as f:
for line in f:
line_split = line.strip().split(',')
if len(line_split) != 2:
continue
timestamp = round(float(line_split[1]))
label = line_split[0]
self._gt_occurrence.append([label, timestamp])
self._gt_occurrence = sorted(self._gt_occurrence, key=lambda item: item[1])
def delta(self):
"""Compute delta of StreamingAccuracyStats against last status."""
fp_delta = self._how_many_fp - self._previous_fp
w_delta = self._how_many_w - self._previous_w
c_delta = self._how_many_c - self._previous_c
if fp_delta == 1:
recognition_state = '(False Positive)'
elif c_delta == 1:
recognition_state = '(Correct)'
elif w_delta == 1:
recognition_state = '(Wrong)'
else:
raise ValueError('Unexpected state in statistics')
# Update the previous status
self._previous_c = self._how_many_c
self._previous_w = self._how_many_w
self._previous_fp = self._how_many_fp
return recognition_state
def calculate_accuracy_stats(self, found_words, up_to_time_ms,
time_tolerance_ms):
"""Calculate accuracy statistics when a new commands is founded.
Given ground truth and corresponding predictions founded by
model, figure out how many were correct. Take a tolerance time, so that only
predictions up to a point in time are considered.
Args:
found_words: A list of all founded commands up to now.
up_to_time_ms: End timestamp of this audio piece.
time_tolerance_ms: The tolerance milliseconds before and after
up_to_time_ms to match a ground truth.
"""
if up_to_time_ms == -1:
latest_possible_time = np.inf
else:
latest_possible_time = up_to_time_ms + time_tolerance_ms
self._how_many_gt = 0
for ground_truth in self._gt_occurrence:
ground_truth_time = ground_truth[1]
if ground_truth_time > latest_possible_time:
break
self._how_many_gt += 1
self._how_many_fp = 0
self._how_many_c = 0
self._how_many_w = 0
has_gt_matched = []
for found_word in found_words:
found_label = found_word[0]
found_time = found_word[1]
earliest_time = found_time - time_tolerance_ms
latest_time = found_time + time_tolerance_ms
has_matched_been_found = False
for ground_truth in self._gt_occurrence:
ground_truth_time = ground_truth[1]
if (ground_truth_time > latest_time or
ground_truth_time > latest_possible_time):
break
if ground_truth_time < earliest_time:
continue
ground_truth_label = ground_truth[0]
if (ground_truth_label == found_label and
has_gt_matched.count(ground_truth_time) == 0):
self._how_many_c += 1
else:
self._how_many_w += 1
has_gt_matched.append(ground_truth_time)
has_matched_been_found = True
break
if not has_matched_been_found:
self._how_many_fp += 1
self._how_many_gt_matched = len(has_gt_matched)
def print_accuracy_stats(self):
"""Write a human-readable description of the statistics to stdout."""
if self._how_many_gt == 0:
tf.compat.v1.logging.info('No ground truth yet, {}false positives'.format(
self._how_many_fp))
else:
any_match_percentage = self._how_many_gt_matched / self._how_many_gt * 100
correct_match_percentage = self._how_many_c / self._how_many_gt * 100
wrong_match_percentage = self._how_many_w / self._how_many_gt * 100
false_positive_percentage = self._how_many_fp / self._how_many_gt * 100
tf.compat.v1.logging.info(
'{:.1f}% matched, {:.1f}% correct, {:.1f}% wrong, '
'{:.1f}% false positive'.format(any_match_percentage,
correct_match_percentage,
wrong_match_percentage,
false_positive_percentage))
|
agry/NGECore2
|
refs/heads/master
|
scripts/mobiles/endor/frenzied_marauder.py
|
2
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('frenzied_marauder')
mobileTemplate.setLevel(78)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("marauder")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_marauder_male_01.iff')
templates.add('object/mobile/shared_marauder_male_02.iff')
templates.add('object/mobile/shared_marauder_male_02.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/rifle/shared_rifle_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedshot')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 100
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
core.spawnService.addMobileTemplate('frenzied_marauder', mobileTemplate)
return
|
looooo/pivy
|
refs/heads/master
|
scons/scons-local-1.2.0.d20090919/SCons/Tool/gas.py
|
2
|
"""SCons.Tool.gas
Tool-specific initialization for as, the Gnu assembler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gas.py 4369 2009/09/19 15:58:29 scons"
as_module = __import__('as', globals(), locals(), [])
assemblers = ['as', 'gas']
def generate(env):
"""Add Builders and construction variables for as to an Environment."""
as_module.generate(env)
env['AS'] = env.Detect(assemblers) or 'as'
def exists(env):
return env.Detect(assemblers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Michael-Pizzileo/lichee-3.0.8-leaked
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
kouk/boto
|
refs/heads/develop
|
tests/integration/gs/util.py
|
101
|
# Copyright (c) 2012, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import time
from boto.provider import Provider
_HAS_GOOGLE_CREDENTIALS = None
def has_google_credentials():
global _HAS_GOOGLE_CREDENTIALS
if _HAS_GOOGLE_CREDENTIALS is None:
provider = Provider('google')
if (provider.get_access_key() is None or
provider.get_secret_key() is None):
_HAS_GOOGLE_CREDENTIALS = False
else:
_HAS_GOOGLE_CREDENTIALS = True
return _HAS_GOOGLE_CREDENTIALS
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
Taken from:
https://github.com/saltycrane/retry-decorator
Licensed under BSD:
https://github.com/saltycrane/retry-decorator/blob/master/LICENSE
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
try_one_last_time = True
while mtries > 1:
try:
return f(*args, **kwargs)
try_one_last_time = False
break
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
if try_one_last_time:
return f(*args, **kwargs)
return
return f_retry # true decorator
return deco_retry
|
aliakseis/LIII
|
refs/heads/master
|
src/3rdparty/torrent-rasterbar/tools/parse_memory_log.py
|
3
|
#!/usr/bin/env python
import os, sys, time
# usage: memory.log memory_index.log
lines = open(sys.argv[1], 'rb').readlines()
index = open(sys.argv[2], 'rb').readlines()
# logfile format:
# #<allocation-point> <time(ms)> <key ('A' | 'F')> <address> <size> <total-size> <total-space-time> <peak-total-size>
# example:
# #12 38 A 0xd902a0 16 16 0 16
allocation_points_to_print = 30
def print_allocation_point(ap):
print 'space_time: %d kBms' % (ap['spacetime'] / 1024)
print 'allocations: %d' % ap['allocations']
print 'peak: %d kB' % (ap['peak'] / 1024)
print 'stack: '
counter = 0
for e in ap['stack']:
print '#%d %s' % (counter, e)
counter += 1
allocation_points = []
for l in index:
l = l.split('#')
l.pop(0)
ap = { 'allocations': 0, 'peak': 0, 'spacetime': 0, 'allocation_point': len(allocation_points), 'stack': l}
allocation_points.append(ap);
for l in lines:
l = l.lstrip('#').rstrip('\n').split(' ')
if len(l) != 8:
print l
continue
try:
ap = int(l[0])
allocation_points[ap]['allocations'] += 1
allocation_points[ap]['peak'] = int(l[7])
allocation_points[ap]['spacetime'] = int(l[6])
except Exception, e:
print type(e), e, l
print '=== space time ==='
hot_ap = []
allocation_points.sort(key = lambda x:x['spacetime'], reverse=True);
counter = 0
for ap in allocation_points[0:allocation_points_to_print]:
print '== %d ==' % counter
counter += 1
print_allocation_point(ap)
hot_ap.append(ap['allocation_point']);
print '=== allocations ==='
allocation_points.sort(key = lambda x:x['allocations'], reverse=True);
for ap in allocation_points[0:allocation_points_to_print]:
print_allocation_point(ap)
print '=== peak ==='
allocation_points.sort(key = lambda x:x['peak'], reverse=True);
for ap in allocation_points[0:allocation_points_to_print]:
print_allocation_point(ap)
# generate graph
lines = open(sys.argv[1], 'rb').readlines()
out = open('memory.dat', 'wb')
cur_line = [0] * allocation_points_to_print
prev_line = [0] * allocation_points_to_print
last_time = 0
for l in lines:
l = l.lstrip('#').rstrip('\n').split(' ')
if len(l) != 8:
print l
continue
try:
time = int(l[1])
if time != last_time:
print >>out, last_time, '\t',
for i in range(allocation_points_to_print):
if cur_line[i] == -1:
print >>out, prev_line[i], '\t',
else:
print >>out, cur_line[i], '\t',
prev_line[i] = cur_line[i]
print >>out
cur_line = [-1] * allocation_points_to_print
last_time = time
size = int(l[5])
ap = int(l[0])
if ap in hot_ap:
index = hot_ap.index(ap)
cur_line[index] = max(cur_line[index], size)
except Exception, e:
print type(e), e, l
out.close()
out = open('memory.gnuplot', 'wb')
print >>out, "set term png size 1200,700"
print >>out, 'set output "memory.png"'
print >>out, 'set xrange [0:*]'
print >>out, 'set xlabel "time (ms)"'
print >>out, 'set ylabel "bytes (B)"'
print >>out, "set style data lines"
print >>out, "set key box"
print >>out, 'plot',
for k in range(allocation_points_to_print):
print >>out, ' "memory.dat" using 1:(',
for i in range(k, allocation_points_to_print):
if i == k: print >>out, '$%d' % (i + 2),
else: print >>out, '+$%d' % (i + 2),
print >>out, ') title "%d" with filledcurves x1, \\' % k
print >>out, 'x=0'
out.close()
os.system('gnuplot memory.gnuplot');
|
justajeffy/arsenalsuite
|
refs/heads/master
|
cpp/lib/epa/__init__.py
|
211
|
import build
|
kivy-garden/garden.geartick
|
refs/heads/master
|
__init__.py
|
1
|
from geartick import GearTick
|
DOTOCA/plugin.video.netflixbmc
|
refs/heads/master
|
resources/lib/pyasn1/type/constraint.py
|
382
|
#
# ASN.1 subtype constraints classes.
#
# Constraints are relatively rare, but every ASN1 object
# is doing checks all the time for whether they have any
# constraints and whether they are applicable to the object.
#
# What we're going to do is define objects/functions that
# can be called unconditionally if they are present, and that
# are simply not present if there are no constraints.
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
class AbstractConstraint:
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes.
"""
def __init__(self, *values):
self._valueMap = {}
self._setValues(values)
self.__hashedValues = None
def __call__(self, value, idx=None):
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: \"%s\"' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other): return self._values != other
def __lt__(self, other): return self._values < other
def __le__(self, other): return self._values <= other
def __gt__(self, other): return self._values > other
def __ge__(self, other): return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._values)
else:
def __bool__(self): return bool(self._values)
def __hash__(self):
if self.__hashedValues is None:
self.__hashedValues = hash((self.__class__.__name__, self._values))
return self.__hashedValues
def _setValues(self, values): self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self): return self._valueMap
def isSuperTypeOf(self, otherConstraint):
return self in otherConstraint.getValueMap() or \
otherConstraint is self or otherConstraint == self
def isSubTypeOf(self, otherConstraint):
return otherConstraint in self._valueMap or \
otherConstraint is self or otherConstraint == self
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _testValue(self, value, idx):
# XXX index vals for performance?
if value not in self._values:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
l = len(value)
if l < self.start or l > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = ()
for v in values:
self._values = self._values + tuple(v)
def _testValue(self, value, idx):
for v in value:
if v not in self._values:
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraing
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx): return self._values[idx]
def __add__(self, value): return self.__class__(self, value)
def __radd__(self, value): return self.__class__(self, value)
def __len__(self): return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for v in values:
self._valueMap[v] = 1
self._valueMap.update(v.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for v in self._values:
v(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for v in self._values:
try:
v(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
|
andrewleech/plugin.video.netflixbmc
|
refs/heads/master
|
resources/lib/pyasn1/type/constraint.py
|
382
|
#
# ASN.1 subtype constraints classes.
#
# Constraints are relatively rare, but every ASN1 object
# is doing checks all the time for whether they have any
# constraints and whether they are applicable to the object.
#
# What we're going to do is define objects/functions that
# can be called unconditionally if they are present, and that
# are simply not present if there are no constraints.
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
class AbstractConstraint:
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes.
"""
def __init__(self, *values):
self._valueMap = {}
self._setValues(values)
self.__hashedValues = None
def __call__(self, value, idx=None):
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: \"%s\"' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other): return self._values != other
def __lt__(self, other): return self._values < other
def __le__(self, other): return self._values <= other
def __gt__(self, other): return self._values > other
def __ge__(self, other): return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._values)
else:
def __bool__(self): return bool(self._values)
def __hash__(self):
if self.__hashedValues is None:
self.__hashedValues = hash((self.__class__.__name__, self._values))
return self.__hashedValues
def _setValues(self, values): self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self): return self._valueMap
def isSuperTypeOf(self, otherConstraint):
return self in otherConstraint.getValueMap() or \
otherConstraint is self or otherConstraint == self
def isSubTypeOf(self, otherConstraint):
return otherConstraint in self._valueMap or \
otherConstraint is self or otherConstraint == self
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _testValue(self, value, idx):
# XXX index vals for performance?
if value not in self._values:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
l = len(value)
if l < self.start or l > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = ()
for v in values:
self._values = self._values + tuple(v)
def _testValue(self, value, idx):
for v in value:
if v not in self._values:
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraing
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx): return self._values[idx]
def __add__(self, value): return self.__class__(self, value)
def __radd__(self, value): return self.__class__(self, value)
def __len__(self): return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for v in values:
self._valueMap[v] = 1
self._valueMap.update(v.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for v in self._values:
v(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for v in self._values:
try:
v(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
|
BaconPancakes/valor
|
refs/heads/master
|
lib/youtube_dl/downloader/dash.py
|
8
|
from __future__ import unicode_literals
import os
from .fragment import FragmentFD
from ..compat import compat_urllib_error
from ..utils import (
sanitize_open,
encodeFilename,
)
class DashSegmentsFD(FragmentFD):
"""
Download segments in a DASH manifest
"""
FD_NAME = 'dashsegments'
def real_download(self, filename, info_dict):
segments = info_dict['fragments'][:1] if self.params.get(
'test', False) else info_dict['fragments']
ctx = {
'filename': filename,
'total_frags': len(segments),
}
self._prepare_and_start_frag_download(ctx)
segments_filenames = []
fragment_retries = self.params.get('fragment_retries', 0)
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
def process_segment(segment, tmp_filename, num):
segment_url = segment['url']
segment_name = 'Frag%d' % num
target_filename = '%s-%s' % (tmp_filename, segment_name)
# In DASH, the first segment contains necessary headers to
# generate a valid MP4 file, so always abort for the first segment
fatal = num == 0 or not skip_unavailable_fragments
count = 0
while count <= fragment_retries:
try:
success = ctx['dl'].download(target_filename, {
'url': segment_url,
'http_headers': info_dict.get('http_headers'),
})
if not success:
return False
down, target_sanitized = sanitize_open(target_filename, 'rb')
ctx['dest_stream'].write(down.read())
down.close()
segments_filenames.append(target_sanitized)
break
except compat_urllib_error.HTTPError as err:
# YouTube may often return 404 HTTP error for a fragment causing the
# whole download to fail. However if the same fragment is immediately
# retried with the same request data this usually succeeds (1-2 attemps
# is usually enough) thus allowing to download the whole file successfully.
# To be future-proof we will retry all fragments that fail with any
# HTTP error.
count += 1
if count <= fragment_retries:
self.report_retry_fragment(err, segment_name, count, fragment_retries)
if count > fragment_retries:
if not fatal:
self.report_skip_fragment(segment_name)
return True
self.report_error('giving up after %s fragment retries' % fragment_retries)
return False
return True
for i, segment in enumerate(segments):
if not process_segment(segment, ctx['tmpfilename'], i):
return False
self._finish_frag_download(ctx)
for segment_file in segments_filenames:
os.remove(encodeFilename(segment_file))
return True
|
jshackley/darkstar
|
refs/heads/master
|
migrations/migrate.py
|
33
|
import MySQLdb
import re
import spell_blobs_to_spell_table
credentials = {}
db = None
cur = None
def connect():
print("Loading conf/map_darkstar.conf")
# Grab mysql credentials
filename = "../conf/map_darkstar.conf"
global credentials
global db
global cur
with open(filename) as f:
while True:
line = f.readline()
if not line: break
match = re.match(r"(mysql_\w+):\s+(\S+)", line)
if match:
credentials[match.group(1)] = match.group(2)
database = credentials["mysql_database"]
host = credentials["mysql_host"]
port = int(credentials["mysql_port"])
login = credentials["mysql_login"]
password = credentials["mysql_password"]
print(database, host, port, login, password)
db = MySQLdb.connect(host=host,
user=login,
passwd=password,
db=database,
port=port)
cur = db.cursor()
print("Connected to database " + database)
def close():
print("Closing connection...")
cur.close()
db.close()
def run_all_migrations():
connect()
run_migration(spell_blobs_to_spell_table)
print("Finished running all migrations")
close()
def run_migration(migration):
# Ensure things like new table exists
migration.check_preconditions(cur)
# Don't run migration twice
if not migration.needs_to_run(cur):
print("Already ran " + migration.migration_name() + " skipping...")
return
print("Running migrations for " + migration.migration_name())
migration.migrate(cur, db)
print("[Success] Done running " + migration.migration_name())
if __name__ == "__main__":
run_all_migrations()
|
HybridF5/nova
|
refs/heads/master
|
nova/api/openstack/compute/quota_sets.py
|
32
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
import six
import six.moves.urllib.parse as urlparse
import webob
from nova.api.openstack.compute.schemas import quota_sets
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import objects
from nova import quota
ALIAS = "os-quota-sets"
QUOTAS = quota.QUOTAS
authorize = extensions.os_compute_authorizer(ALIAS)
class QuotaSetsController(wsgi.Controller):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict."""
if project_id:
result = dict(id=str(project_id))
else:
result = {}
for resource in QUOTAS.resources:
if resource in quota_set:
result[resource] = quota_set[resource]
return dict(quota_set=result)
def _validate_quota_limit(self, resource, limit, minimum, maximum):
# NOTE: -1 is a flag value for unlimited
if limit < -1:
msg = (_("Quota limit %(limit)s for %(resource)s "
"must be -1 or greater.") %
{'limit': limit, 'resource': resource})
raise webob.exc.HTTPBadRequest(explanation=msg)
def conv_inf(value):
return float("inf") if value == -1 else value
if conv_inf(limit) < conv_inf(minimum):
msg = (_("Quota limit %(limit)s for %(resource)s must "
"be greater than or equal to already used and "
"reserved %(minimum)s.") %
{'limit': limit, 'resource': resource, 'minimum': minimum})
raise webob.exc.HTTPBadRequest(explanation=msg)
if conv_inf(limit) > conv_inf(maximum):
msg = (_("Quota limit %(limit)s for %(resource)s must be "
"less than or equal to %(maximum)s.") %
{'limit': limit, 'resource': resource, 'maximum': maximum})
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_quotas(self, context, id, user_id=None, usages=False):
if user_id:
values = QUOTAS.get_user_quotas(context, id, user_id,
usages=usages)
else:
values = QUOTAS.get_project_quotas(context, id, usages=usages)
if usages:
return values
else:
return {k: v['limit'] for k, v in values.items()}
@extensions.expected_errors(())
def show(self, req, id):
context = req.environ['nova.context']
authorize(context, action='show', target={'project_id': id})
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = params.get('user_id', [None])[0]
return self._format_quota_set(id,
self._get_quotas(context, id, user_id=user_id))
@extensions.expected_errors(())
def detail(self, req, id):
context = req.environ['nova.context']
authorize(context, action='detail', target={'project_id': id})
user_id = req.GET.get('user_id', None)
return self._format_quota_set(id, self._get_quotas(context, id,
user_id=user_id,
usages=True))
@extensions.expected_errors(400)
@validation.schema(quota_sets.update)
def update(self, req, id, body):
context = req.environ['nova.context']
authorize(context, action='update', target={'project_id': id})
project_id = id
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = params.get('user_id', [None])[0]
quota_set = body['quota_set']
force_update = strutils.bool_from_string(quota_set.get('force',
'False'))
settable_quotas = QUOTAS.get_settable_quotas(context, project_id,
user_id=user_id)
# NOTE(dims): Pass #1 - In this loop for quota_set.items(), we validate
# min/max values and bail out if any of the items in the set is bad.
valid_quotas = {}
for key, value in six.iteritems(body['quota_set']):
if key == 'force' or (not value and value != 0):
continue
# validate whether already used and reserved exceeds the new
# quota, this check will be ignored if admin want to force
# update
value = int(value)
if not force_update:
minimum = settable_quotas[key]['minimum']
maximum = settable_quotas[key]['maximum']
self._validate_quota_limit(key, value, minimum, maximum)
valid_quotas[key] = value
# NOTE(dims): Pass #2 - At this point we know that all the
# values are correct and we can iterate and update them all in one
# shot without having to worry about rolling back etc as we have done
# the validation up front in the loop above.
for key, value in valid_quotas.items():
try:
objects.Quotas.create_limit(context, project_id,
key, value, user_id=user_id)
except exception.QuotaExists:
objects.Quotas.update_limit(context, project_id,
key, value, user_id=user_id)
# Note(gmann): Removed 'id' from update's response to make it same
# as V2. If needed it can be added with microversion.
return self._format_quota_set(None, self._get_quotas(context, id,
user_id=user_id))
@extensions.expected_errors(())
def defaults(self, req, id):
context = req.environ['nova.context']
authorize(context, action='defaults', target={'project_id': id})
values = QUOTAS.get_defaults(context)
return self._format_quota_set(id, values)
# TODO(oomichi): Here should be 204(No Content) instead of 202 by v2.1
# +microversions because the resource quota-set has been deleted completely
# when returning a response.
@extensions.expected_errors(())
@wsgi.response(202)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context, action='delete', target={'project_id': id})
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = params.get('user_id', [None])[0]
if user_id:
QUOTAS.destroy_all_by_project_and_user(context,
id, user_id)
else:
QUOTAS.destroy_all_by_project(context, id)
class QuotaSets(extensions.V21APIExtensionBase):
"""Quotas management support."""
name = "Quotas"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(ALIAS,
QuotaSetsController(),
member_actions={'defaults': 'GET',
'detail': 'GET'})
resources.append(res)
return resources
def get_controller_extensions(self):
return []
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/_backup_workload_items_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BackupWorkloadItemsOperations(object):
"""BackupWorkloadItemsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
vault_name, # type: str
resource_group_name, # type: str
fabric_name, # type: str
container_name, # type: str
filter=None, # type: Optional[str]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WorkloadItemResourceList"]
"""Provides a pageable list of workload item of a specific container according to the query filter
and the pagination
parameters.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container.
:type fabric_name: str
:param container_name: Name of the container.
:type container_name: str
:param filter: OData filter options.
:type filter: str
:param skip_token: skipToken Filter.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkloadItemResourceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicesbackup.models.WorkloadItemResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkloadItemResourceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WorkloadItemResourceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/items'} # type: ignore
|
ruuk/script.web.viewer2
|
refs/heads/master
|
lib/webviewer/cssutils/tests/test_cssutils.py
|
2
|
# -*- coding: utf-8 -*-
"""Testcases for cssutils.css.CSSCharsetRule"""
from __future__ import with_statement
import basetest
import codecs
import cssutils
import os
import sys
import tempfile
import xml.dom
try:
import mock
except ImportError:
mock = None
print "install mock library to run all tests"
class CSSutilsTestCase(basetest.BaseTestCase):
def setUp(self):
cssutils.ser.prefs.useDefaults()
def tearDown(self):
cssutils.ser.prefs.useDefaults()
exp = u'''@import "import/import2.css";
.import {
/* ./import.css */
background-image: url(images/example.gif)
}'''
def test_VERSION(self):
self.assertEqual('0.9.10', cssutils.VERSION)
def test_parseString(self):
"cssutils.parseString()"
s = cssutils.parseString(self.exp,
media='handheld, screen',
title='from string')
self.assert_(isinstance(s, cssutils.css.CSSStyleSheet))
self.assertEqual(None, s.href)
self.assertEqual(self.exp.encode(), s.cssText)
self.assertEqual(u'utf-8', s.encoding)
self.assertEqual(u'handheld, screen', s.media.mediaText)
self.assertEqual(u'from string', s.title)
self.assertEqual(self.exp.encode(), s.cssText)
ir = s.cssRules[0]
self.assertEqual('import/import2.css', ir.href)
irs = ir.styleSheet
self.assertEqual(cssutils.css.CSSStyleSheet, type(irs))
href = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'sheets', 'import.css')
href = cssutils.helper.path2url(href)
s = cssutils.parseString(self.exp,
href=href)
self.assertEqual(href, s.href)
ir = s.cssRules[0]
self.assertEqual('import/import2.css', ir.href)
irs = ir.styleSheet
self.assert_(isinstance(irs, cssutils.css.CSSStyleSheet))
self.assertEqual(irs.cssText, '@import "../import3.css";\n@import "import-impossible.css" print;\n.import2 {\n /* sheets/import2.css */\n background: url(http://example.com/images/example.gif);\n background: url(//example.com/images/example.gif);\n background: url(/images/example.gif);\n background: url(images2/example.gif);\n background: url(./images2/example.gif);\n background: url(../images/example.gif);\n background: url(./../images/example.gif)\n }'.encode())
tests = {
'a {color: red}': u'a {\n color: red\n }',
'a {color: rgb(1,2,3)}': u'a {\n color: rgb(1, 2, 3)\n }'
}
self.do_equal_p(tests)
def test_parseFile(self):
"cssutils.parseFile()"
# name if used with open, href used for @import resolving
name = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'sheets', 'import.css')
href = cssutils.helper.path2url(name)
s = cssutils.parseFile(name, href=href, media='screen', title='from file')
self.assert_(isinstance(s, cssutils.css.CSSStyleSheet))
if sys.platform.startswith('java'):
# on Jython only file:
self.assert_(s.href.startswith('file:'))
else:
# normally file:/// on win and file:/ on unix
self.assert_(s.href.startswith('file:/'))
self.assert_(s.href.endswith('/sheets/import.css'))
self.assertEqual(u'utf-8', s.encoding)
self.assertEqual(u'screen', s.media.mediaText)
self.assertEqual(u'from file', s.title)
self.assertEqual(self.exp.encode(), s.cssText)
ir = s.cssRules[0]
self.assertEqual('import/import2.css', ir.href)
irs = ir.styleSheet
self.assert_(isinstance(irs, cssutils.css.CSSStyleSheet))
self.assertEqual(irs.cssText, '@import "../import3.css";\n@import "import-impossible.css" print;\n.import2 {\n /* sheets/import2.css */\n background: url(http://example.com/images/example.gif);\n background: url(//example.com/images/example.gif);\n background: url(/images/example.gif);\n background: url(images2/example.gif);\n background: url(./images2/example.gif);\n background: url(../images/example.gif);\n background: url(./../images/example.gif)\n }'.encode())
# name is used for open and setting of href automatically
# test needs to be relative to this test file!
os.chdir(os.path.dirname(__file__))
name = os.path.join('..', '..', '..', 'sheets', 'import.css')
s = cssutils.parseFile(name, media='screen', title='from file')
self.assert_(isinstance(s, cssutils.css.CSSStyleSheet))
if sys.platform.startswith('java'):
# on Jython only file:
self.assert_(s.href.startswith('file:'))
else:
# normally file:/// on win and file:/ on unix
self.assert_(s.href.startswith('file:/'))
self.assert_(s.href.endswith('/sheets/import.css'))
self.assertEqual(u'utf-8', s.encoding)
self.assertEqual(u'screen', s.media.mediaText)
self.assertEqual(u'from file', s.title)
self.assertEqual(self.exp.encode(), s.cssText)
ir = s.cssRules[0]
self.assertEqual('import/import2.css', ir.href)
irs = ir.styleSheet
self.assert_(isinstance(irs, cssutils.css.CSSStyleSheet))
self.assertEqual(irs.cssText, '@import "../import3.css";\n@import "import-impossible.css" print;\n.import2 {\n /* sheets/import2.css */\n background: url(http://example.com/images/example.gif);\n background: url(//example.com/images/example.gif);\n background: url(/images/example.gif);\n background: url(images2/example.gif);\n background: url(./images2/example.gif);\n background: url(../images/example.gif);\n background: url(./../images/example.gif)\n }'.encode())
# next test
css = u'a:after { content: "羊蹄€\u2020" }'
fd, name = tempfile.mkstemp('_cssutilstest.css')
t = os.fdopen(fd, 'wb')
t.write(css.encode('utf-8'))
t.close()
self.assertRaises(UnicodeDecodeError, cssutils.parseFile, name, 'ascii')
# ???
s = cssutils.parseFile(name, encoding='iso-8859-1')
self.assertEqual(cssutils.css.CSSStyleSheet, type(s))
self.assertEqual(s.cssRules[1].selectorText, 'a:after')
s = cssutils.parseFile(name, encoding='utf-8')
self.assertEqual(cssutils.css.CSSStyleSheet, type(s))
self.assertEqual(s.cssRules[1].selectorText, 'a:after')
css = u'@charset "iso-8859-1"; a:after { content: "ä" }'
t = codecs.open(name, 'w', 'iso-8859-1')
t.write(css)
t.close()
self.assertRaises(
UnicodeDecodeError, cssutils.parseFile, name, 'ascii')
s = cssutils.parseFile(name, encoding='iso-8859-1')
self.assertEqual(cssutils.css.CSSStyleSheet, type(s))
self.assertEqual(s.cssRules[1].selectorText, 'a:after')
self.assertRaises(
UnicodeDecodeError, cssutils.parseFile, name, 'utf-8')
# clean up
try:
os.remove(name)
except OSError, e:
pass
def test_parseUrl(self):
"cssutils.parseUrl()"
href = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'sheets', 'import.css')
#href = u'file:' + urllib.pathname2url(href)
href = cssutils.helper.path2url(href)
#href = 'http://seewhatever.de/sheets/import.css'
s = cssutils.parseUrl(href,
media='tv, print',
title='from url')
self.assert_(isinstance(s, cssutils.css.CSSStyleSheet))
self.assertEqual(href, s.href)
self.assertEqual(self.exp.encode(), s.cssText)
self.assertEqual(u'utf-8', s.encoding)
self.assertEqual(u'tv, print', s.media.mediaText)
self.assertEqual('from url', s.title)
sr = s.cssRules[1]
img = sr.style.getProperty('background-image').propertyValue[0].value
self.assertEqual(img, 'images/example.gif')
ir = s.cssRules[0]
self.assertEqual(u'import/import2.css', ir.href)
irs = ir.styleSheet
self.assertEqual(irs.cssText, '@import "../import3.css";\n@import "import-impossible.css" print;\n.import2 {\n /* sheets/import2.css */\n background: url(http://example.com/images/example.gif);\n background: url(//example.com/images/example.gif);\n background: url(/images/example.gif);\n background: url(images2/example.gif);\n background: url(./images2/example.gif);\n background: url(../images/example.gif);\n background: url(./../images/example.gif)\n }'.encode())
ir2 = irs.cssRules[0]
self.assertEqual(u'../import3.css', ir2.href)
irs2 = ir2.styleSheet
self.assertEqual(irs2.cssText, '/* import3 */\n.import3 {\n /* from ./import/../import3.css */\n background: url(images/example3.gif);\n background: url(./images/example3.gif);\n background: url(import/images2/example2.gif);\n background: url(./import/images2/example2.gif);\n background: url(import/images2/../../images/example3.gif)\n }'.encode())
def test_setCSSSerializer(self):
"cssutils.setSerializer() and cssutils.ser"
s = cssutils.parseString('a { left: 0 }')
exp4 = '''a {
left: 0
}'''
exp1 = '''a {
left: 0
}'''
self.assertEqual(exp4.encode(), s.cssText)
newser = cssutils.CSSSerializer(cssutils.serialize.Preferences(indent=' '))
cssutils.setSerializer(newser)
self.assertEqual(exp1.encode(), s.cssText)
newser = cssutils.CSSSerializer(cssutils.serialize.Preferences(indent=' '))
cssutils.ser = newser
self.assertEqual(exp4.encode(), s.cssText)
def test_parseStyle(self):
"cssutils.parseStyle()"
s = cssutils.parseStyle('x:0; y:red')
self.assertEqual(type(s), cssutils.css.CSSStyleDeclaration)
self.assertEqual(s.cssText, u'x: 0;\ny: red')
s = cssutils.parseStyle('@import "x";')
self.assertEqual(type(s), cssutils.css.CSSStyleDeclaration)
self.assertEqual(s.cssText, u'')
tests = [
(u'content: "ä"', 'iso-8859-1'),
(u'content: "€"', 'utf-8')
]
for v, e in tests:
s = cssutils.parseStyle(v.encode(e), encoding=e)
self.assertEqual(s.cssText, v)
self.assertRaises(UnicodeDecodeError, cssutils.parseStyle,
u'content: "ä"'.encode('utf-8'), 'ascii')
def test_getUrls(self):
"cssutils.getUrls()"
cssutils.ser.prefs.keepAllProperties = True
css='''
@import "im1";
@import url(im2);
@import url( im3 );
@import url( "im4" );
@import url( 'im5' );
a {
background-image: url(a) !important;
background-\image: url(b);
background: url(c) no-repeat !important;
/* issue #46 */
src: local("xx"),
url("f.woff") format("woff"),
url("f.otf") format("opentype"),
url("f.svg#f") format("svg");
}'''
urls = set(cssutils.getUrls(cssutils.parseString(css)))
self.assertEqual(urls, set(["im1", "im2", "im3", "im4", "im5",
"a", "b", "c",
u'f.woff', u'f.svg#f', u'f.otf']))
cssutils.ser.prefs.keepAllProperties = False
def test_replaceUrls(self):
"cssutils.replaceUrls()"
cssutils.ser.prefs.keepAllProperties = True
css='''
@import "im1";
@import url(im2);
a {
background-image: url(c) !important;
background-\image: url(b);
background: url(a) no-repeat !important;
}'''
s = cssutils.parseString(css)
cssutils.replaceUrls(s, lambda old: "NEW" + old)
self.assertEqual(u'@import "NEWim1";', s.cssRules[0].cssText)
self.assertEqual(u'NEWim2', s.cssRules[1].href)
self.assertEqual(u'''background-image: url(NEWc) !important;
background-\\image: url(NEWb);
background: url(NEWa) no-repeat !important''', s.cssRules[2].style.cssText)
cssutils.ser.prefs.keepAllProperties = False
# CSSStyleDeclaration
style = cssutils.parseStyle(u'''color: red;
background-image:
url(1.png),
url('2.png')''')
cssutils.replaceUrls(style, lambda url: 'prefix/'+url)
self.assertEqual(style.cssText, u'''color: red;
background-image: url(prefix/1.png), url(prefix/2.png)''')
def test_resolveImports(self):
"cssutils.resolveImports(sheet)"
if mock:
self._tempSer()
cssutils.ser.prefs.useMinified()
a = u'@charset "iso-8859-1";@import"b.css";\xe4{color:green}'.encode('iso-8859-1')
b = u'@charset "ascii";\\E4 {color:red}'.encode('ascii')
# normal
m = mock.Mock()
with mock.patch('cssutils.util._defaultFetcher', m):
m.return_value = (None, b)
s = cssutils.parseString(a)
# py3 TODO
self.assertEqual(a, s.cssText)
self.assertEqual(b, s.cssRules[1].styleSheet.cssText)
c = cssutils.resolveImports(s)
# py3 TODO
self.assertEqual(u'\xc3\xa4{color:red}\xc3\xa4{color:green}'.encode('iso-8859-1'),
c.cssText)
c.encoding = 'ascii'
self.assertEqual(ur'@charset "ascii";\E4 {color:red}\E4 {color:green}'.encode(),
c.cssText)
# b cannot be found
m = mock.Mock()
with mock.patch('cssutils.util._defaultFetcher', m):
m.return_value = (None, None)
s = cssutils.parseString(a)
# py3 TODO
self.assertEqual(a, s.cssText)
self.assertEqual(cssutils.css.CSSStyleSheet,
type(s.cssRules[1].styleSheet))
c = cssutils.resolveImports(s)
# py3 TODO
self.assertEqual(u'@import"b.css";\xc3\xa4{color:green}'.encode('iso-8859-1'),
c.cssText)
# @import with media
a = u'@import"b.css";@import"b.css" print, tv ;@import"b.css" all;'
b = u'a {color: red}'
m = mock.Mock()
with mock.patch('cssutils.util._defaultFetcher', m):
m.return_value = (None, b)
s = cssutils.parseString(a)
c = cssutils.resolveImports(s)
self.assertEqual('a{color:red}@media print,tv{a{color:red}}a{color:red}'.encode(),
c.cssText)
# cannot resolve with media => keep original
a = u'@import"b.css"print;'
b = u'@namespace "http://example.com";'
m = mock.Mock()
with mock.patch('cssutils.util._defaultFetcher', m):
m.return_value = (None, b)
s = cssutils.parseString(a)
c = cssutils.resolveImports(s)
self.assertEqual(a.encode(), c.cssText)
# urls are adjusted too, layout:
# a.css
# c.css
# img/img.gif
# b/
# b.css
# subimg/subimg.gif
a = u'''
@import"b/b.css";
a {
x: url(/img/abs.gif);
y: url(img/img.gif);
z: url(b/subimg/subimg.gif);
}'''
def fetcher(url):
c = {
'b.css': u'''
@import"../c.css";
b {
x: url(/img/abs.gif);
y: url(../img/img.gif);
z: url(subimg/subimg.gif);
}''',
'c.css': u'''
c {
x: url(/img/abs.gif);
y: url(./img/img.gif);
z: url(./b/subimg/subimg.gif);
}'''
}
return 'utf-8', c[os.path.split(url)[1]]
@mock.patch.object(cssutils.util, '_defaultFetcher',
new=fetcher)
def do():
s = cssutils.parseString(a)
r = cssutils.resolveImports(s)
return s, r
s, r = do()
cssutils.ser.prefs.useDefaults()
cssutils.ser.prefs.keepComments = False
self.assertEqual(u'''c {
x: url(/img/abs.gif);
y: url(img/img.gif);
z: url(b/subimg/subimg.gif)
}
b {
x: url(/img/abs.gif);
y: url(img/img.gif);
z: url(b/subimg/subimg.gif)
}
a {
x: url(/img/abs.gif);
y: url(img/img.gif);
z: url(b/subimg/subimg.gif)
}'''.encode(), r.cssText)
cssutils.ser.prefs.useDefaults()
else:
self.assertEqual(False, u'Mock needed for this test')
if __name__ == '__main__':
import unittest
unittest.main()
|
Pantynopants/pyGraph
|
refs/heads/master
|
using_alg/TSP.py
|
1
|
from models import *
import utils
import pandas as pd
import numpy as np
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
# get all shorest path of the map first
# using ford
# as a matrix, all point are arrivable
# http://python.jobbole.com/81457/
# http://blog.csdn.net/duzuokanyunqi1/article/details/46573429
# https://www.python.org/doc/essays/graphs/
# http://www.python-course.eu/graphs_python.php
# http://tfinley.net/software/pyglpk/ex_ham.html
@utils.get_total_dist
def dfs(graph, start= None, end = None):
visited = []
# print(len(graph.columns.values))
if start == None:
start = graph.columns.values[0]
stack = [start]
while stack:
vertex = stack.pop()
if end == vertex and len(visited) >= len(graph.columns.values)-2:
return visited
visited.append(vertex)
for i in utils.LocateVex(graph, vertex):
if i in visited or i in stack:
continue
stack.append(i)
print("faild")
return visited
# graph = {'A': {'B':1, 'C':2},
# 'B': {'A':1, 'D':2, 'E':3},
# }
def find_all_paths(graph, start, end, path=[]):
r"""
>>> find_all_paths(graph, 'A', 'D')
[['A', 'B', 'C', 'D'], ['A', 'B', 'D'], ['A', 'C', 'D']]
"""
path = path + [start]
if start == end:
return [path]
if not graph.has_key(start):
return []
paths = []
for node in graph[start].keys():
# print("IN FOR", node)
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
def test():
graph = {'A': {'B':1, 'C':2},
'B': {'C':1, 'D':2,},
'C': {'D':5},
'D': {'C':1},
'E': {'F':1},
'F': {'C':1}
}
dist = 32767 ##
min_path = []
for path in find_all_paths(graph, 'A', 'D'):
if dist > utils.get_distance(graph, path):
dist = utils.get_distance(graph, path)
min_path = path
print(dist)
print(min_path)
def TSP(graph, start = None):
adj_poi_list = utils.LocateVex(graph, start)
result_path = []
result_dist = 0
if len(adj_poi_list) < 2:
print("bad start point")
return
else:
for poi in adj_poi_list:
print(poi)
dfs_visit, dist = dfs(graph, start= start, end = poi)
for i in dfs_visit:
print(i)
# print(j)
|
coolsvap/sos
|
refs/heads/master
|
sos/plugins/virsh.py
|
8
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin
import glob
import os
class LibvirtClient(Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin):
"""client for libvirt virtualization API
"""
plugin_name = 'virsh'
profiles = ('system', 'virt')
packages = ('libvirt-client')
def setup(self):
# virt-manager logs
if not self.get_option("all_logs"):
self.add_copy_spec_limit("/root/.virt-manager/*", sizelimit=5)
else:
self.add_copy_spec("/root/.virt-manager/*")
# get lit of VMs/domains
domains_file = self.get_cmd_output_now('virsh list --all')
# cycle through the VMs/domains list, ignore 2 header lines and latest
# empty line, and dumpxml domain name in 2nd column
if domains_file:
domains_lines = open(domains_file, "r").read().splitlines()[2:]
for domain in filter(lambda x: x, domains_lines):
self.add_cmd_output("virsh -r dumpxml %s" % domain.split()[1],
timeout=180)
# vim: et ts=4 sw=4
|
Zephor5/zspider
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# zspider documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 24 16:40:17 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.pngmath",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"zspider"
copyright = u"2015, Zephor Wu"
author = u"Zephor Wu"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"1.0"
# The full version, including alpha/beta/rc tags.
release = u"1.0.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s documentation" % project
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "zspiderdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "zspider.tex", u"zspider Documentation", u"Zephor Wu", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "zspider", u"zspider Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"zspider",
u"zspider Documentation",
author,
"zspider",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
jwren/intellij-community
|
refs/heads/master
|
python/testData/debug/test_multiprocess_process.py
|
12
|
import time
import multiprocessing
def run(name):
print(name)
if __name__ == '__main__':
multiprocessing.Process(target=run, args=("subprocess",)).start()
while True:
time.sleep(0.1)
|
squirrelo/qiime
|
refs/heads/master
|
scripts/print_qiime_config.py
|
15
|
#!/usr/bin/env python
from __future__ import division
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jens Reeder", "Dan Knights", "Antonio Gonzalez Pena",
"Justin Kuczynski", "Jai Ram Rideout", "Greg Caporaso",
"Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
import re
from os import access, X_OK, R_OK, W_OK, getenv, environ, remove, devnull
from os.path import isdir, exists, split, join
from sys import platform, version as python_version, executable, stdout
from unittest import TestLoader, TextTestRunner, TestCase
from shutil import rmtree
from subprocess import Popen, PIPE, STDOUT
from optparse import SUPPRESS_HELP
core_dependency_missing_msg = "See the QIIME Installation Guide: http://qiime.org/install/install.html"
try:
from numpy import __version__ as numpy_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from scipy import __version__ as scipy_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from tempfile import mkdtemp
from skbio.util import remove_files
from burrito.util import ApplicationNotFoundError, ApplicationError
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from qiime.parse import parse_qiime_config_file
from qiime.util import (load_qiime_config,
get_qiime_project_dir,
get_qiime_library_version,
get_qiime_scripts_dir,
get_rdp_jarpath,
get_java_version,
get_pynast_version,
parse_command_line_parameters,
make_option,
qiime_system_call,
get_qiime_temp_dir)
from qiime.denoiser.utils import check_flowgram_ali_exe
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from biom import __version__ as biom_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from qcli import __version__ as qcli_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from pyqi import __version__ as pyqi_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from qiime_default_reference import __version__ as qdr_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from skbio import __version__ as skbio_lib_version
from burrito.util import which
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from pandas import __version__ as pandas_lib_version
except ImportError:
pandas_lib_version = "Not installed."
try:
from matplotlib import __version__ as matplotlib_lib_version
except ImportError:
matplotlib_lib_version = "Not installed."
try:
from emperor import __version__ as emperor_lib_version
except ImportError:
emperor_lib_version = "Not installed."
try:
from burrito import __version__ as burrito_lib_version
except ImportError:
burrito_lib_version = "Not installed."
# current release of bfillings doesn't have __version__. if it gets added in
# future releases, display that info, otherwise just indicate whether it's
# installed or not
try:
import bfillings
bfillings_lib_version = bfillings.__version__
except ImportError:
bfillings_lib_version = "Not installed."
except AttributeError:
bfillings_lib_version = "Installed."
# gdata doesn't have __version__ and adding that is outside of our control.
# just indicate whether it's installed or not
try:
import gdata
except ImportError:
gdata_installed = "Not installed."
else:
gdata_installed = "Installed."
try:
import h5py
h5py_lib_version = (
h5py.__version__ + ' (HDF5 version: %s)' % h5py.version.hdf5_version)
except ImportError:
h5py_lib_version = "Not installed."
pynast_lib_version = get_pynast_version()
if pynast_lib_version is None:
pynast_lib_version = "Not installed."
if which('sortmerna') is None:
sortmerna_lib_version = "Not installed."
else:
_, serr, _ = qiime_system_call("sortmerna --version")
sortmerna_lib_version = serr.strip()
if which('sumaclust') is None:
sumaclust_lib_version = "Not installed."
else:
sout, _, _ = qiime_system_call("sumaclust --help")
sout_lines = sout.split('\n')
sumaclust_lib_version = "Installed, but can't identify version."
for e in sout_lines:
e = e.strip()
if e.startswith('SUMACLUST Version'):
sumaclust_lib_version = e
break
if which('swarm') is None:
swarm_lib_version = "Not installed."
else:
_, serr, return_value = qiime_system_call("swarm --version")
serr = serr.strip()
if serr:
swarm_lib_version = serr.split('\n')[0]
else:
swarm_lib_version = "Installed, but can't identify version."
script_info = {}
script_info['brief_description'] = ("Print and optionally test QIIME "
"configuration details")
script_info['script_description'] = ("Print QIIME configuration details and "
"optionally perform tests of the QIIME "
"base or full install.")
script_info['script_usage'] = []
script_info['script_usage'].append(
("Example 1",
"Print basic QIIME configuration details:", """%prog"""))
script_info['script_usage'].append(
("Example 2",
"Print basic QIIME configuration details and test the base QIIME installation:",
"%prog -t"))
script_info['script_usage'].append(
("Example 3",
"Print basic QIIME configuration details and test the full QIIME installation:",
"%prog -tf"))
script_info['output_description'] = ("Prints QIIME configuration details to "
"standard output.")
script_info['version'] = __version__
script_info['help_on_no_arguments'] = False
script_info['required_options'] = []
script_info['optional_options'] = [
make_option('-t', '--test', action='store_true', default=False,
help='Test the QIIME install and configuration '
'[default: %default]'),
make_option('-b', '--qiime_base_install', action='store_true',
default=True, help=SUPPRESS_HELP),
make_option('-f', '--qiime_full_install', action='store_true',
default=False, help='If passed, report on dependencies required for the '
'QIIME full install. To perform tests of the QIIME '
'full install, you must also pass -t. '
'[default: %default]'),
make_option('--haiku',
action='store_true',
default=False,
help=SUPPRESS_HELP)
]
class QIIMEConfig(TestCase):
def setUp(self):
self.config = load_qiime_config()
def test_cluster_jobs_fp(self):
"""cluster_jobs_fp is set to a valid path and is executable"""
fp = self.config["cluster_jobs_fp"]
if fp:
full_path = which(fp)
if full_path:
fp = full_path
# test if file exists or is in $PATH
self.assertTrue(exists(fp),
"cluster_jobs_fp set to an invalid file path or is not in $PATH: %s" % fp)
modes = {R_OK: "readable",
W_OK: "writable",
X_OK: "executable"}
# test if file readable
self.assertTrue(access(fp, X_OK),
"cluster_jobs_fp is not %s: %s" % (modes[X_OK], fp))
def test_blastmat_dir(self):
"""blastmat_dir is set to a valid path."""
test_qiime_config_variable("blastmat_dir", self.config, self)
def test_pynast_template_alignment_fp(self):
"""pynast_template_alignment, if set, is set to a valid path"""
test_qiime_config_variable("pynast_template_alignment_fp",
self.config, self)
def test_pynast_template_alignment_blastdb_fp(self):
"""pynast_template_alignment_blastdb, if set, is set to a valid path"""
test_qiime_config_variable("pynast_template_alignment_blastdb_fp",
self.config, self)
def test_pynast_template_alignment_blastdb_fp(self):
"""pynast_template_alignment_blastdb, if set, is set to a valid path"""
test_qiime_config_variable("pynast_template_alignment_blastdb_fp",
self.config, self)
def test_get_qiime_scripts_dir(self):
"""Test that we can find the directory containing QIIME scripts."""
# get_qiime_scripts_dir will raise an error if it can't find a scripts
# directory.
scripts_dir = get_qiime_scripts_dir()
self.assertTrue(isdir(scripts_dir), "The QIIME scripts directory does "
"not exist: %s" % scripts_dir)
def test_temp_dir(self):
"""temp_dir is set to a valid path"""
temp_dir = get_qiime_temp_dir()
self.assertTrue(exists(temp_dir),
"temp_dir does not exist: %s" % temp_dir)
self.assertTrue(isdir(temp_dir),
"temp_dir is not a directory: %s" % temp_dir)
self.assertTrue(access(temp_dir, W_OK),
"temp_dir is not writable: %s" % temp_dir)
# we are not testing these values from the qiime_config:
# jobs_to_start 1
# seconds_to_sleep 60
def test_for_unrecognized_values(self):
"""qiime_config has no extra values"""
error_msg_fragment = (" contains unrecognized values:\n%s\nYou can "
"safely remove these values from your QIIME "
"config file as they will be ignored by QIIME.")
qiime_project_dir = get_qiime_project_dir()
orig_config = parse_qiime_config_file(open(qiime_project_dir +
'/qiime/support_files/qiime_config'))
# check the env qiime_config
qiime_config_env_filepath = getenv('QIIME_CONFIG_FP')
if qiime_config_env_filepath:
qiime_config_via_env = parse_qiime_config_file(
open(qiime_config_env_filepath))
extra_vals = []
for key in qiime_config_via_env:
if key not in orig_config:
extra_vals.append(key)
if extra_vals:
self.fail("The QIIME config file set via the QIIME_CONFIG_FP "
"environment variable" +
error_msg_fragment % ", ".join(extra_vals))
# check the qiime_config in $HOME/.qiime_config
home_dir = getenv('HOME')
if (exists(home_dir + "/.qiime_config")):
qiime_config_home = parse_qiime_config_file(
open(home_dir + "/.qiime_config"))
extra_vals = []
for key in qiime_config_home:
if key not in orig_config:
extra_vals.append(key)
if extra_vals:
self.fail("The .qiime_config in your HOME" +
error_msg_fragment % ", ".join(extra_vals))
class QIIMEDependencyBase(QIIMEConfig):
def test_uclust_supported_version(self):
"""uclust is in path and version is supported """
acceptable_version = (1, 2, 22)
self.assertTrue(which('uclust'),
"uclust not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = 'uclust --version'
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split('v')[-1].strip('q')
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported uclust version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_FastTree_supported_version(self):
"""FastTree is in path and version is supported """
acceptable_version = (2, 1, 3)
self.assertTrue(which('FastTree'),
"FastTree not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
# If FastTree is run interactively, it outputs the following line:
# Usage for FastTree version 2.1.3 SSE3:
#
# If run non-interactively:
# FastTree Version 2.1.3 SSE3
command = "FastTree 2>&1 > %s | grep -i version" % devnull
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read().strip()
version_str_matches = re.findall('ersion\s+(\S+)\s+', stdout)
self.assertEqual(len(version_str_matches), 1,
"Could not find FastTree version info in usage text "
"'%s'." % stdout)
version_str = version_str_matches[0]
try:
version = tuple(map(int, version_str.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
acceptable_version_str = '.'.join(map(str, acceptable_version))
self.assertTrue(pass_test,
"Unsupported FastTree version. %s is required, but "
"running %s." % (acceptable_version_str, version_str))
class QIIMEDependencyFull(QIIMEDependencyBase):
def test_ampliconnoise_install(self):
""" AmpliconNoise install looks sane."""
url = "http://qiime.org/install/install.html#ampliconnoise-install-notes"
pyro_lookup_file = getenv('PYRO_LOOKUP_FILE')
self.assertTrue(pyro_lookup_file is not None,
"$PYRO_LOOKUP_FILE variable is not set. See %s for help." % url)
self.assertTrue(exists(pyro_lookup_file),
"$PYRO_LOOKUP_FILE variable is not set to an existing filepath.")
seq_lookup_file = getenv('SEQ_LOOKUP_FILE')
self.assertTrue(seq_lookup_file is not None,
"$SEQ_LOOKUP_FILE variable is not set. See %s for help." % url)
self.assertTrue(exists(seq_lookup_file),
"$SEQ_LOOKUP_FILE variable is not set to an existing filepath.")
self.assertTrue(which("SplitKeys.pl"),
"Couldn't find SplitKeys.pl. " +
"Perhaps AmpliconNoise Scripts directory isn't in $PATH?" +
" See %s for help." % url)
self.assertTrue(which("FCluster"),
"Couldn't find FCluster. " +
"Perhaps the AmpliconNoise bin directory isn't in $PATH?" +
" See %s for help." % url)
self.assertTrue(which("Perseus"),
"Couldn't find Perseus. " +
"Perhaps the AmpliconNoise bin directory isn't in $PATH?" +
" See %s for help." % url)
def test_sourcetracker_installed(self):
"""sourcetracker is installed"""
sourcetracker_path = getenv('SOURCETRACKER_PATH')
self.assertNotEqual(sourcetracker_path, None,
("SOURCETRACKER_PATH is not set. This is "
"only important if you plan to use SourceTracker."))
self.assertTrue(exists(sourcetracker_path),
"SOURCETRACKER_PATH is not set to a valid path: %s" %
sourcetracker_path)
def test_chimeraSlayer_install(self):
"""no obvious problems with ChimeraSlayer install """
# The ChimerSalyer app requires that all its components are installed
# relative to the main program ChimeraSlayer.pl.
# We therefore check that at least one the files is there.
# However, if the directory structure of ChimeraSlayer changes, this test will most
# likely fail as well and need to be updated.
# Tested with the version of microbiomeutil_2010-04-29
chim_slay = which("ChimeraSlayer.pl")
self.assertTrue(chim_slay, "ChimeraSlayer was not found in your $PATH")
dir, app_name = split(chim_slay)
self.assertTrue(
exists(dir + "/ChimeraParentSelector/chimeraParentSelector.pl"),
"ChimeraSlayer depends on external files in directoryies relative to its "
"install directory. These do not appear to be present.")
def test_blastall_fp(self):
"""blastall_fp is set to a valid path"""
blastall = self.config["blastall_fp"]
if not self.config["blastall_fp"].startswith("/"):
# path is relative, figure out absolute path
blast_all = which(blastall)
if not blast_all:
raise ApplicationNotFoundError(
"blastall_fp set to %s, but is not in your PATH. Either use an absolute path to or put it in your PATH." %
blastall)
self.config["blastall_fp"] = blast_all
test_qiime_config_variable("blastall_fp", self.config, self, X_OK)
def test_blast_supported_version(self):
"""blast is in path and version is supported """
acceptable_version = (2, 2, 22)
self.assertTrue(which('blastall'),
"blast not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = 'blastall | grep blastall'
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[1].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported blast version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_cdbtools_supported_version(self):
"""cdbtools is in path and version is supported """
acceptable_version = (0, 99)
self.assertTrue(which('cdbfasta'),
"cdbtools not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "cdbfasta -v"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[2].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported cdbtools version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_INFERNAL_supported_version(self):
"""INFERNAL is in path and version is supported """
acceptable_version = (1, 0, 2)
self.assertTrue(which('cmbuild'),
"Infernal not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "cmbuild -h | grep INF"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[2].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported INFERNAL version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_muscle_supported_version(self):
"""muscle is in path and version is supported """
acceptable_version = (3, 8, 31)
self.assertTrue(which('muscle'),
"muscle not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "muscle -version"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[1].strip('v')
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported muscle version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_mothur_supported_version(self):
"""mothur is in path and version is supported """
acceptable_version = (1, 25, 0)
self.assertTrue(which('mothur'),
"mothur not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
# mothur creates a log file in cwd, so create a tmp and cd there first
log_file = join(get_qiime_temp_dir(), 'mothur.log')
command = "mothur \"#set.logfile(name=%s)\" | grep '^mothur v'" % log_file
stdout, stderr, exit_Status = qiime_system_call(command)
# remove log file
remove_files([log_file], error_on_missing=False)
version_string = stdout.strip().split(' ')[1].strip('v.')
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported mothur version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_denoiser_supported_version(self):
"""denoiser aligner is ready to use """
pass_test = True
try:
check_flowgram_ali_exe()
except (ApplicationNotFoundError, ApplicationError):
pass_test = False
self.assertTrue(pass_test,
"Denoiser flowgram aligner not found or not "
"executable. This may or may not be a problem "
"depending on which components of QIIME you plan to "
"use.")
def test_raxmlHPC_supported_version(self):
"""raxmlHPC is in path and version is supported """
acceptable_version = [(7, 3, 0), (7, 3, 0)]
self.assertTrue(which('raxmlHPC'),
"raxmlHPC not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "raxmlHPC -v | grep version"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[4].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version in acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported raxmlHPC version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_clearcut_supported_version(self):
"""clearcut is in path and version is supported """
acceptable_version = (1, 0, 9)
self.assertTrue(which('clearcut'),
"clearcut not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "clearcut -V"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[2].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported clearcut version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_cdhit_supported_version(self):
"""cd-hit is in path and version is supported """
self.assertTrue(which('cd-hit'),
"cd-hit not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
# cd-hit does not have a version print in their program
def test_rtax_supported_version(self):
"""rtax is in path and version is supported """
acceptable_version = [(0, 984)]
self.assertTrue(which('rtax'),
"rtax not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "rtax 2>&1 > %s | grep Version | awk '{print $2}'" % devnull
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version in acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported rtax version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_usearch_supported_version(self):
"""usearch is in path and version is supported """
acceptable_version = [(5, 2, 236), (5, 2, 236)]
self.assertTrue(which('usearch'),
"usearch not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "usearch --version"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.split('v')[1]
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version in acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported usearch version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_R_supported_version(self):
"""R is in path and version is supported """
minimum_version = (2, 12, 0)
self.assertTrue(which('R'),
"R not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "R --version | grep 'R version' | awk '{print $3}'"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = False
if version[0] == minimum_version[0]:
if version[1] == minimum_version[1]:
if version[2] >= minimum_version[2]:
pass_test = True
elif version[1] > minimum_version[1]:
pass_test = True
elif version[0] > minimum_version[0]:
pass_test = True
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported R version. %s or greater is required, but running %s."
% ('.'.join(map(str, minimum_version)), version_string))
def test_gdata_install(self):
"""gdata is installed"""
# We currently can't programmatically find the version of gdata. An
# issue has been created alerting the gdata devs.
pass_test = True
try:
import gdata
except ImportError:
pass_test = False
self.assertTrue(pass_test, "gdata is not installed.")
def test_h5py(self):
"""h5py is installed"""
self.assertTrue(h5py_lib_version != "Not installed.",
"h5py is not installed. You should install this for "
"improved performance with large BIOM files or if "
"working with BIOM format version 2.x files. For "
"more information, see "
"http://qiime.org/documentation/file_formats.html#biom-file-format-versions")
def test_qiime_config_variable(variable, qiime_config, test,
access_var=R_OK, fail_on_missing=False):
"""test if a variable is set and set to a readable path."""
fp = qiime_config[variable]
if not fp:
if fail_on_missing:
test.fail("%s not set." % variable)
else:
# non-essential file, so do not fail
return
# test if file exists
test.assertTrue(exists(fp), "%s set to an invalid file path: %s" %
(variable, fp))
modes = {R_OK: "readable",
W_OK: "writable",
X_OK: "executable"}
# test if file readable
test.assertTrue(access(fp, access_var),
"%s is not %s: %s" % (variable, modes[access_var], fp))
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if opts.haiku:
print "QIIME provides insight\nmicrobial in nature\nto ecology"
exit(0)
qiime_config = load_qiime_config()
test = opts.test
qiime_full_install = opts.qiime_full_install
rdp_jarpath = get_rdp_jarpath()
if rdp_jarpath is None:
rdp_version = "Not installed."
else:
rdp_version = split(rdp_jarpath)[1]
java_version = get_java_version()
if java_version is None:
java_version = "Not installed."
system_info = [
("Platform", platform),
("Python version", python_version.replace('\n', ' ')),
("Python executable", executable)]
max_len = max([len(e[0]) for e in system_info])
print "\nSystem information"
print "=================="
for v in system_info:
print "%*s:\t%s" % (max_len, v[0], v[1])
print "\nQIIME default reference information"
print "==================================="
print "For details on what files are used as QIIME's default references, see here:"
print " https://github.com/biocore/qiime-default-reference/releases/tag/%s" % qdr_lib_version
version_info = [
("QIIME library version", get_qiime_library_version()),
("QIIME script version", __version__),
("qiime-default-reference version", qdr_lib_version),
("NumPy version", numpy_lib_version),
("SciPy version", scipy_lib_version),
("pandas version", pandas_lib_version),
("matplotlib version", matplotlib_lib_version),
("biom-format version", biom_lib_version),
("h5py version", h5py_lib_version),
("qcli version", qcli_lib_version),
("pyqi version", pyqi_lib_version),
("scikit-bio version", skbio_lib_version),
("PyNAST version", pynast_lib_version),
("Emperor version", emperor_lib_version),
("burrito version", burrito_lib_version),
("burrito-fillings version", bfillings_lib_version),
("sortmerna version", sortmerna_lib_version),
("sumaclust version", sumaclust_lib_version),
("swarm version", swarm_lib_version),
("gdata", gdata_installed)
]
if qiime_full_install:
version_info += [
("RDP Classifier version (if installed)", rdp_version),
("Java version (if installed)", java_version)]
max_len = max([len(e[0]) for e in version_info])
print "\nDependency versions"
print "==================="
for v in version_info:
print "%*s:\t%s" % (max_len, v[0], v[1])
print "\nQIIME config values"
print "==================="
print "For definitions of these settings and to learn how to configure QIIME, see here:"
print " http://qiime.org/install/qiime_config.html"
print " http://qiime.org/tutorials/parallel_qiime.html\n"
max_len = max([len(key) for key in qiime_config])
for key, value in qiime_config.items():
print "%*s:\t%s" % (max_len, key, value)
if test:
if qiime_full_install:
print "\nQIIME full install test results"
print "==============================="
suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyFull)
else:
print "\nQIIME base install test results"
print "==============================="
suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyBase)
if opts.verbose:
verbosity = 2
else:
verbosity = 1
TextTestRunner(stream=stdout, verbosity=verbosity).run(suite)
if __name__ == "__main__":
main()
|
WladimirSidorenko/SentiLex
|
refs/heads/master
|
scripts/measure_corpus_agreement.py
|
1
|
#!/usr/bin/env python2.7
# -*- coding: utf-8-unix; mode: python; -*-
"""
DESCRIPTION:
============
Script for measuring the inter-annotator agreement on MMAX corpus.
USAGE:
======
measure_corpus_agreement.py [OPTIONS] basedata_dir markables_dir1 markables_dir2
EXAMPLE:
========
(envoke from the top directory of the archive)
./scripts/measure_corpus_agreement.py --pattern='*.xml' corpus/basedata/ \
corpus/annotator-1/markables/ corpus/annotator-2/markables/
LICENSE:
========
Copyright (c) 2014-2015, ANONYMOUS
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the authors nor the names of its contributors may be
used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
##################################################################
# Libararies
from merge_conll_mmax import parse_span
from collections import deque
from copy import copy, deepcopy
import argparse
import glob
import os
import re
import sys
import string
import xml.etree.ElementTree as _ET
##################################################################
# Variables and Constants
MATCH_MRKBL_IDX = 0
TOTAL_MRKBL_IDX = 1
OFFSET_IDX = 0
OVERLAP1_IDX = 0
TOTAL1_IDX = 1
OVERLAP2_IDX = 2
TOTAL2_IDX = 3
MISSING = "missing"
REDUNDANT = "redundant"
MARKABLE = "markable"
WRD_PRFX = "word_"
WRD_PRFX_RE = re.compile("word_")
WRD_SEP = ","
WRD_RANGE_SEP = ".."
NAMESPACE_PRFX = "www.eml.org/NameSpaces/"
DIFF_PRFX = "diff-"
BASEDATA_SFX = ".words.xml"
SMILEY_RE = re.compile(r"(?:ha|d|x|lol|" + '|'.join([re.escape(c) for c in string.punctuation]) + ")+$", \
re.I)
MARK_SFX_RE = re.compile("_[^_]+_level.xml$")
MRKBL_NAME_RE = re.compile(r"^.*_([^_]+)_level.xml$", re.IGNORECASE)
MRKBL_FNAME_RE = re.compile("^(.*_)([^_]+_level.xml)$", re.IGNORECASE)
MRKBL_ID_RE = re.compile(r"(?<!\S)markable_", re.IGNORECASE)
EMOEXPRESSION_NAME = "emo-expression"
POLARITY = "polarity"
POSITIVE = "positive"
NEGATIVE = "negative"
MSTAT_HEADER_FMT = "{:15s}{:>10s}{:>10s}{:>10s}{:>10s}{:>10s}"
MSTAT_FMT = "{:15s}{:>10d}{:>10d}{:>10d}{:>10d}{:>10.4f}"
XML_HEADER = """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE markables SYSTEM "markables.dtd">
"""
EMPTY_SET = set()
BINARY_OVERLAP = 1
PROPORTIONAL_OVERLAP = 2
EXACT_MATCH = 4
statistics = {}
##################################################################
# Methods
def _compute_kappa(a_overlap1, a_total1, a_overlap2, a_total2,
a_total_tkns, a_cmp):
"""Compute Cohen's Kappa.
@param a_overlap1 - number of overlapping annotations in the 1-st
annotation
@param a_total1 - total number of markables in the 1-st annotation
@param a_overlap2 - number of overlapping annotations in the 2-nd
annotation
@param a_total2 - total number of markables in the 2-nd annotation
@param a_total_tkns - total number of tokens in file
@param a_cmp - scheme used for comparison
@return float
"""
assert a_overlap1 <= a_total1, \
"The numer of matched annotation in the 1-st file" \
" exceeds the total number of markables."
assert a_overlap2 <= a_total2, \
"The numer of matched annotation in the 2-nd file" \
" exceeds the total number of markables."
assert a_overlap1 == a_overlap2 or a_cmp & BINARY_OVERLAP, \
"Different numbers of overlapping tokens for two annotators."
# compute chance agreement
if a_total_tkns == 0.0:
return 0.0
agreement = float(a_total_tkns - a_total1 + a_overlap1
- a_total2 + a_overlap2) / a_total_tkns
# chances that the first/second annotator randomly annotated a token with
# that markable
chance1 = float(a_total1) / a_total_tkns
chance2 = float(a_total2) / a_total_tkns
chance = chance1 * chance2 + (1.0 - chance1) * (1.0 - chance2)
assert chance <= 1.0, \
"Invalid value of chance agreement: '{:.2f}'".format(kappa)
# compute Cohen's Kappa
if chance < 1.0:
kappa = (agreement - chance) / (1.0 - chance)
else:
kappa = 0.0
assert kappa <= 1.0, "Invalid kappa value: '{:.4f}'".format(kappa)
return kappa
def _markables2tuples(a_t):
"""
Convert markables in XML tree to tuples.
@param a_t - XML tree with elements
@return list of tuples
"""
retlist = []
# return empty list for non-present
if a_t is None:
return retlist
# iterate over elements of XML tree and append them as tuples
mspan = mname = None
mattrs = []
span_w_ids = []
for mark in a_t.iter():
# due to presence of namespaces, we can't directly access markable
# elements, so we hypotethesize them by checking their attributes
mspan = mark.get("span")
if not mspan:
continue
# get id's of all words covered by the given span
span_w_ids = parse_span(mspan, a_int_fmt=True)
assert span_w_ids, "Markable span is empty"
# obtain the name of the markable
mname = mark.get("mmax_level")
# obtain and prune attributes of the markable
mattrs = mark.attrib
# # we assume those attributes are not needed as they can be found in
# # other fields of the tuples
# del mattrs["span"]
# del mattrs["mmax_level"]
# append markable as a tuple to the markable list
span_w_ids.sort()
retlist.append([span_w_ids, mname, mattrs])
# return list of markables sorted by the starting and ending positions of
# the spans
return sorted(retlist, key=lambda e: (e[0][0], e[0][-1]))
def _w_id2span(a_w_ids):
"""
Convert list of word id's to string specification.
@param a_w_ids - list of word ids as integers
@return string representation of word id's
"""
ret_list = []
if not a_w_ids:
return ""
# convert list to a deque
w_deque = deque(a_w_ids)
# iterate over all word ids in deque
prev_w_id = r_start = w_deque.popleft()
w_id = -1
while w_deque:
w_id = w_deque.popleft()
# if fhe next token id breaks contiguous span, add a range from r_start
# to prev_w_id or (if no range is available) just a single token for
# r_start
if w_id - prev_w_id > 1:
assert r_start >= 0, "Invalid range start: {:d}".format(rng_start)
# append range, if previous word id is other than range start
if prev_w_id != r_start:
ret_list.append(WRD_PRFX + str(r_start) + WRD_RANGE_SEP
+ WRD_PRFX + str(prev_w_id))
else:
ret_list.append(WRD_PRFX + str(r_start))
# new range start is the current word id
r_start = w_id
prev_w_id = w_id
# append the final span
if prev_w_id != r_start:
ret_list.append(WRD_PRFX + str(r_start) + WRD_RANGE_SEP
+ WRD_PRFX + str(prev_w_id))
else:
ret_list.append(WRD_PRFX + str(r_start))
# join separate words and ranges by commas
return WRD_SEP.join(ret_list)
def _make_attrs(a_attrs, a_update_ids=True):
"""Convert a list of attribute name/value pairs to dictionary.
@param a_attrs - list of attribute name/value pairs
@param a_update_ids - boolean flag indicating whether ids should be renamed
@return dictionary of attributes
"""
retdict = dict(a_attrs)
# change markable ids if necessary
if a_update_ids:
for k, v in retdict.iteritems():
retdict[k] = MRKBL_ID_RE.sub(r"\g<0>100500", v)
return retdict
def _add_markable(a_prnt, a_tpl, **a_attrs):
"""Convert markables in XML tree to tuples.
@param a_prnt - parent XML element to which new element should be appended
@param a_tpl - tuple containing information about markable
@param a_attrs - dictionary of additional attributes
@return XML element representing the markable
"""
m_w_id, m_name, m_attrs = a_tpl
mrkbl = _ET.SubElement(a_prnt, MARKABLE, {})
# change transferred id's and update attributes of the new markable
mrkbl.attrib.update(_make_attrs(a_tpl[-1]))
# set word spans of this markable
mrkbl.attrib["span"] = _w_id2span(m_w_id)
# set attributes which were provided as markables
mrkbl.attrib.update(a_attrs)
return mrkbl
def _is_smiley(a_word):
"""Check if given word is a smiley
@param a_word - word to be checked
@return \c true if the word is a smile \c false otherwise
"""
return bool(SMILEY_RE.match(a_word))
def _is_emoticon(a_w_ids, a_wid2word):
"""
Check if given markable is assigned to an emoticon
@param a_w_ids - list of word id's pertaining to markable
@param a_wid2word - dictionary mapping word indices to words
@return \c true if the markable is an emoticon \c false otherwise
"""
return all([_is_smiley(a_wid2word[float(w_id)]) for w_id in a_w_ids])
def _update_stat(a_t1, a_t2, a_n, a_diff1_pos, a_diff1_neg,
a_diff2_pos, a_diff2_neg, a_wid2word,
a_cmp=BINARY_OVERLAP):
"""Compare annotations present in two XML trees.
@param a_t1 - first XML tree to compare
@param a_t2 - second XML tree to compare
@param a_n - total number of tokens
@param a_diff1_pos - list for storing agreement statistics about positive
emo-expressions in the 1-st annotation
@param a_diff1_neg - list for storing agreement statistics about negative
emo-expressions in the 1-st annotation
@param a_diff1_pos - list for storing agreement statistics about positive
emo-expressions in the 2-nd annotation
@param a_diff1_neg - list for storing agreement statistics about negative
emo-expressions in the 2-nd annotation
@param a_wid2word - dictionary mapping word indices to words
@param a_cmp - mode for comparing two spans
@return \c updated total number of tokens
"""
# convert markables in files to lists of tuples
markable_tuples1 = _markables2tuples(a_t1)
markable_tuples2 = _markables2tuples(a_t2)
# generate lists all indices in markables
for ipolarity in (POSITIVE, NEGATIVE):
if ipolarity == POSITIVE:
stat1 = a_diff1_pos
stat2 = a_diff2_pos
else:
stat1 = a_diff1_neg
stat2 = a_diff2_neg
m_tuples1 = [mt for mt in markable_tuples1
if mt[-1][POLARITY] == ipolarity
and not _is_emoticon(mt[OFFSET_IDX], a_wid2word)]
m_tuples2 = [mt for mt in markable_tuples2
if mt[-1][POLARITY] == ipolarity
and not _is_emoticon(mt[OFFSET_IDX], a_wid2word)]
m1_word_ids = [w for mt in m_tuples1 for w in mt[0]]
m2_word_ids = [w for mt in m_tuples2 for w in mt[0]]
# generate sets of indices in markables
m1_set = set(m1_word_ids)
m2_set = set(m2_word_ids)
if a_cmp & PROPORTIONAL_OVERLAP:
# get total number of tokens marked with that markable
stat1[TOTAL_MRKBL_IDX] = len(m1_set)
stat2[TOTAL_MRKBL_IDX] = len(m2_set)
# for proportional overlap, the number of overlapping tokens will
# be the same for both files
stat1[MATCH_MRKBL_IDX] = stat2[MATCH_MRKBL_IDX] = \
len(m1_set & m2_set)
else:
# get total number of tokens marked with that markable
if a_cmp & BINARY_OVERLAP:
stat1[TOTAL_MRKBL_IDX] = len(m1_word_ids)
stat2[TOTAL_MRKBL_IDX] = len(m2_word_ids)
# for binary overlap, we consider two spans to agree on all of
# their tokens, if they have at least one token in common
w_id_set = None
# matches1, matches2 = set(), set()
# populate set of word ids from the 1-st annotation whose spans
# are overlapping
for mt1 in m_tuples1:
w_id_set = set(mt1[OFFSET_IDX])
if w_id_set & m2_set:
# matches1.update(w_id_set)
stat1[MATCH_MRKBL_IDX] += len(w_id_set)
# populate set of word ids from the 2-nd annotation whose spans
# are overlapping
for mt2 in m_tuples2:
w_id_set = set(mt2[OFFSET_IDX])
if w_id_set & m1_set:
# matches2.update(w_id_set)
stat2[MATCH_MRKBL_IDX] += len(w_id_set)
# UNCOMMENT IF NECESSARY
# # now, join the two sets and count the number of elements in
# # them this will be
# common_matches = matches1.union(matches2)
# a_diff2[MATCH_MRKBL_IDX] = len(common_matches)
# # we also need to update the total number of markables in
# # both annotations to prevent that the number of matched
# # markables is bigger than the total number of marked tokens
# # # a_diff1[TOTAL_MRKBL_IDX] = a_diff2[TOTAL_MRKBL_IDX] =
# # len(m1_set.union(m2_set))
else:
# update counters of total words
# for exact matches, we will simultenously iterate over two
# lists of markable tuples
len1, len2 = len(m_tuples1), len(m_tuples2)
stat1[TOTAL_MRKBL_IDX], stat2[TOTAL_MRKBL_IDX] = len1, len2
if len1 > len2:
max_len, min_len = len1, len2
else:
max_len, min_len = len2, len1
i = j = 0
mt1 = mt2 = mt_w1 = mt_w2 = None
while i < min_len and j < min_len:
# obtain word id's for two tuples
mt1, mt2 = m_tuples1[i], m_tuples2[j]
mt_w1, mt_w2 = mt1[OFFSET_IDX], mt2[OFFSET_IDX]
# if the 1-st tuple precedes the 2-nd, increment the 1-st
# span
if mt_w1[0] < mt_w2[0]:
# create new markable tuple for non-matching indices
i += 1
# if the 2-nd tuple precedes the 1-st, do the opposite
elif mt_w1[0] > mt_w2[0]:
# create new markable tuple for non-matching indices
j += 1
# if both spans are equal update the overlap counters
elif mt_w1 == mt_w2:
stat2[MATCH_MRKBL_IDX] += 1
i += 1
j += 1
# the number of overlapping tokens will be the same for both
# annotators
stat1[MATCH_MRKBL_IDX] = stat2[MATCH_MRKBL_IDX]
a_n -= len(m1_set | m2_set)
a_n += max(len1, len2)
return a_n
def compute_stat(a_basedata_dir, a_dir1, a_dir2,
a_ptrn="", a_cmp=BINARY_OVERLAP):
"""
Compare markables in two annotation directories.
@param a_basedata_dir - directory containing basedata for MMAX project
@param a_dir1 - directory containing markables for the first annotator
@param a_dir2 - directory containing markables for the second annotator
@param a_ptrn - shell pattern for markable files
@param a_cmp - mode for comparing two annotation spans
@return \c void
"""
global statistics
# find annotation files from first directory
if a_ptrn:
dir1_iterator = glob.iglob(a_dir1 + os.sep + a_ptrn)
else:
dir1_iterator = os.listdir(a_dir1)
# iterate over files from the first directory
f1 = f2 = ""
basename1 = markname = ""
basedata_fname = base_key = ""
fd1 = fd2 = basedata_fd = None
t1 = t2 = t2_root = None
f1_out = f2_out = ""
words = None
w_id2word = dict()
annotations = None
n = 0 # total number of words in the file
for f1 in dir1_iterator:
# get name of second file
basename1 = os.path.basename(f1)
print >> sys.stderr, "Processing file '{:s}'".format(f1)
f2 = a_dir2 + os.sep + basename1
# open both files for reading
fd1 = open(a_dir1 + os.sep + basename1, 'r')
try:
t1 = _ET.parse(fd1)
except (IOError, _ET.ParseError):
t1 = None
finally:
fd1.close()
# read XML information from second file ignoring non-existent, empty,
# and wrong formatted files
try:
fd2 = open(f2, 'r')
try:
t2 = _ET.parse(fd2)
finally:
fd2.close()
except (IOError, _ET.ParseError):
t2 = None
if t1 is None or t2 is None:
continue
# determine the name of the markable for which we should calculate
# annotations
mname = MRKBL_NAME_RE.match(basename1).group(1).lower()
if mname.lower() != "emo-expression":
continue
mname_pos = mname + "-pos"
mname_neg = mname + "-neg"
# prepare containers for storing information about matching and
# mismatching annotations
# the 0-th element in the list is the number of matching annotations,
# the 1-st element is the total number of tokens annotated with that
# markables, the 2-nd element is a list of annotation span tuples which
# are different in another annotation
anno1_pos = [0, 0]
anno1_neg = [0, 0]
anno2_pos = [0, 0]
anno2_neg = [0, 0]
base_key = MARK_SFX_RE.sub("", basename1)
# obtain number of words from basedata file
basedata_fname = a_basedata_dir + os.sep + base_key + BASEDATA_SFX
basedata_fd = open(basedata_fname, "r")
# get total number of words in a file
idoc = _ET.parse(basedata_fd).getroot()
words = idoc.findall("word")
n = len(words)
for w in words:
w_id2word[float(WRD_PRFX_RE.sub("", w.get("id")))] = w.text
basedata_fd.close()
# compare two XML trees
n = _update_stat(t1, t2, n, anno1_pos, anno1_neg,
anno2_pos, anno2_neg, w_id2word, a_cmp)
# assign statistics
statistics[base_key] = {"tokens": n, "annotators":
[{mname_pos: anno1_pos, mname_neg: anno1_neg},
{mname_pos: anno2_pos, mname_neg: anno2_neg}]}
w_id2word.clear()
def print_stat(a_cmp):
"""
Output statistics about agreement measure.
@param a_cmp - scheme used for comparison
@return void
"""
markable_dic = {}
anno_dic1 = anno_dic2 = None
m_stat1 = m_stat2 = markable_stat = None
m_names = []
N = n = 0 # N - total number of tokens in all files
overlap1 = overlap2 = 0
total1 = total2 = 0
kappa = 0
# output Kappa statistics for files and simultaneously update dictionary
# for total markable statistics
for fname, fstat_dic in statistics.iteritems():
print "File: '{:s}'".format(fname)
# number of tokens in file
n = fstat_dic["tokens"]
N += n
# iterate over markables in that file
# print repr(fstat_dic["annotators"])
anno_dic1, anno_dic2 = fstat_dic["annotators"]
assert set(anno_dic1.keys()) == set(anno_dic2.keys()), \
"Unmatched number of markables for two annotators" \
" '{:s}'\nvs.\n{:s}.".format(repr(anno_dic1.keys()),
repr(anno_dic2.keys()))
# iterate over markables
for m_name, m_stat1 in anno_dic1.iteritems():
m_stat2 = anno_dic2[m_name]
overlap1, overlap2 = \
m_stat1[MATCH_MRKBL_IDX], m_stat2[MATCH_MRKBL_IDX]
total1, total2 = m_stat1[TOTAL_MRKBL_IDX], m_stat2[TOTAL_MRKBL_IDX]
# compute kappa's
kappa = _compute_kappa(overlap1, total1,
overlap2, total2, n, a_cmp)
print "Markable: {:s}".format(m_name)
print "Matched: {:d}; Total marked: {:d}; Kappa:" \
" {:.2f}".format(overlap1, total1, kappa)
# update dictionary of markables
if m_name in markable_dic:
markable_stat = markable_dic[m_name]
markable_stat[OVERLAP1_IDX] += overlap1
markable_stat[TOTAL1_IDX] += total1
markable_stat[OVERLAP2_IDX] += overlap2
markable_stat[TOTAL2_IDX] += total2
else:
markable_dic[m_name] = [overlap1, total1, overlap2, total2]
print "=================================" \
"================================="
# output statistics for markables
print "STATISTICS FOR MARKABLES"
print MSTAT_HEADER_FMT.format("Markable", "Overlap1", "Total1",
"Overlap2", "Total2", "Kappa")
for m_name, m_stat in markable_dic.iteritems():
kappa = _compute_kappa(m_stat[OVERLAP1_IDX], m_stat[TOTAL1_IDX],
m_stat[OVERLAP2_IDX], m_stat[TOTAL2_IDX],
N, a_cmp)
print MSTAT_FMT.format(m_name, m_stat[OVERLAP1_IDX],
m_stat[TOTAL1_IDX], m_stat[OVERLAP2_IDX],
m_stat[TOTAL2_IDX], kappa)
def main():
"""
Main method for measuring agreement and marking differences in corpus.
"""
# process arguments
argparser = argparse.ArgumentParser(description="Script for measuring"
" agreement between two annotated"
" MMAX projects and marking difference"
" between them.")
argparser.add_argument("basedata_dir",
help="directory containing basedata (tokens)"
" for MMAX project")
argparser.add_argument("directory1",
help="directory containing markables from"
" the first annotator")
argparser.add_argument("directory2",
help="directory containing markables"
" from the second annotator")
# agreement schemes for spans
argparser.add_argument("-b", "--binary-overlap",
help="consider two spans to agree on all of tokens"
" of their respective spans if they overlap by at"
"least one token (default comparison mode)",
action="store_const", const=BINARY_OVERLAP,
default=0)
argparser.add_argument("-p", "--proportional-overlap",
help="count as agreement only tokens that actually"
" ovelap in two spans", action="store_const",
const=PROPORTIONAL_OVERLAP, default=0)
argparser.add_argument("-x", "--exact-match",
help="consider two spans to agree if they have"
" exactly the same boundaries",
action="store_const",
const=EXACT_MATCH, default=0)
# additional flags
argparser.add_argument("--pattern",
help="shell pattern for files with markables",
type=str, default="*emo-expression*")
args = argparser.parse_args()
# check if comparison scheme is specified
cmp_scheme = args.binary_overlap | args.proportional_overlap \
| args.exact_match
if cmp_scheme == 0:
cmp_scheme = BINARY_OVERLAP
# check existence and readability of directory
dir1 = args.directory1
dir2 = args.directory2
assert os.path.isdir(dir1) and os.access(dir1, os.X_OK), \
"Directory '{:s}' does nor exist or cannot be accessed.".format(dir1)
assert os.path.isdir(dir2) and os.access(dir2, os.X_OK), \
"Directory '{:s}' does nor exist or cannot be accessed.".format(dir2)
# compare the directory contents and edit files if necessary
compute_stat(args.basedata_dir, dir1, dir2,
a_ptrn=args.pattern, a_cmp=cmp_scheme)
print_stat(cmp_scheme)
##################################################################
# Main
if __name__ == "__main__":
main()
|
mF2C/COMPSs
|
refs/heads/master
|
compss/programming_model/bindings/python/src/pycompss/api/dummy/__init__.py
|
12
|
#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
|
hyperized/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_inventory_source.py
|
38
|
#!/usr/bin/python
# coding: utf-8 -*-
# Copyright: (c) 2018, Adrien Fleury <fleu42@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: tower_inventory_source
author: "Adrien Fleury (@fleu42)"
version_added: "2.7"
short_description: create, update, or destroy Ansible Tower inventory source.
description:
- Create, update, or destroy Ansible Tower inventories source. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the inventory source.
required: True
description:
description:
- The description to use for the inventory source.
inventory:
description:
- The inventory the source is linked to.
required: True
source:
description:
- Types of inventory source.
choices:
- file
- scm
- ec2
- gce
- azure
- azure_rm
- vmware
- satellite6
- cloudforms
- openstack
- rhv
- tower
- custom
required: True
credential:
description:
- Credential to use to retrieve the inventory from.
source_vars:
description:
- >-
The source_vars allow to Override variables found in the source config
file. For example with Openstack, specifying *private: false* would
change the output of the openstack.py script. It has to be YAML or
JSON.
timeout:
description:
- Number in seconds after which the Tower API methods will time out.
source_project:
description:
- Use a *project* as a source for the *inventory*.
source_path:
description:
- Path to the file to use as a source in the selected *project*.
update_on_project_update:
description:
- >-
That parameter will sync the inventory when the project is synced. It
can only be used with a SCM source.
type: bool
source_regions:
description:
- >-
List of regions for your cloud provider. You can include multiple all
regions. Only Hosts associated with the selected regions will be
updated. Refer to Ansible Tower documentation for more detail.
instance_filters:
description:
- >-
Provide a comma-separated list of filter expressions. Hosts are
imported when all of the filters match. Refer to Ansible Tower
documentation for more detail.
group_by:
description:
- >-
Specify which groups to create automatically. Group names will be
created similar to the options selected. If blank, all groups above
are created. Refer to Ansible Tower documentation for more detail.
source_script:
description:
- >-
The source custom script to use to build the inventory. It needs to
exist.
overwrite:
description:
- >-
If set, any hosts and groups that were previously present on the
external source but are now removed will be removed from the Tower
inventory. Hosts and groups that were not managed by the inventory
source will be promoted to the next manually created group or if
there is no manually created group to promote them into, they will be
left in the "all" default group for the inventory. When not checked,
local child hosts and groups not found on the external source will
remain untouched by the inventory update process.
type: bool
overwrite_vars:
description:
- >-
If set, all variables for child groups and hosts will be removed
and replaced by those found on the external source. When not checked,
a merge will be performed, combining local variables with those found
on the external source.
type: bool
update_on_launch:
description:
- >-
Each time a job runs using this inventory, refresh the inventory from
the selected source before executing job tasks.
type: bool
update_cache_timeout:
description:
- >-
Time in seconds to consider an inventory sync to be current. During
job runs and callbacks the task system will evaluate the timestamp of
the latest sync. If it is older than Cache Timeout, it is not
considered current, and a new inventory sync will be performed.
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
validate_certs:
description:
- Tower option to avoid certificates check.
type: bool
aliases: [ tower_verify_ssl ]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Add tower inventory source
tower_inventory_source:
name: Inventory source
description: My Inventory source
inventory: My inventory
credential: Devstack_credential
source: openstack
update_on_launch: true
overwrite: true
source_vars: '{ private: false }'
state: present
validate_certs: false
'''
RETURN = ''' # '''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
SOURCE_CHOICES = {
'file': 'Directory or Script',
'scm': 'Sourced from a Project',
'ec2': 'Amazon EC2',
'gce': 'Google Compute Engine',
'azure': 'Microsoft Azure',
'azure_rm': 'Microsoft Azure Resource Manager',
'vmware': 'VMware vCenter',
'satellite6': 'Red Hat Satellite 6',
'cloudforms': 'Red Hat CloudForms',
'openstack': 'OpenStack',
'rhv': 'Red Hat Virtualization',
'tower': 'Ansible Tower',
'custom': 'Custom Script',
}
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(required=False),
inventory=dict(required=True),
source=dict(required=True,
choices=SOURCE_CHOICES.keys()),
credential=dict(required=False),
source_vars=dict(required=False),
timeout=dict(type='int', required=False),
source_project=dict(required=False),
source_path=dict(required=False),
update_on_project_update=dict(type='bool', required=False),
source_regions=dict(required=False),
instance_filters=dict(required=False),
group_by=dict(required=False),
source_script=dict(required=False),
overwrite=dict(type='bool', required=False),
overwrite_vars=dict(type='bool', required=False),
update_on_launch=dict(type='bool', required=False),
update_cache_timeout=dict(type='int', required=False),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=True)
name = module.params.get('name')
inventory = module.params.get('inventory')
source = module.params.get('source')
state = module.params.get('state')
json_output = {'inventory_source': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
inventory_source = tower_cli.get_resource('inventory_source')
try:
params = {}
params['name'] = name
params['source'] = source
if module.params.get('description'):
params['description'] = module.params.get('description')
if module.params.get('credential'):
credential_res = tower_cli.get_resource('credential')
try:
credential = credential_res.get(
name=module.params.get('credential'))
params['credential'] = credential['id']
except (exc.NotFound) as excinfo:
module.fail_json(
msg='Failed to update credential source,'
'credential not found: {0}'.format(excinfo),
changed=False
)
if module.params.get('source_project'):
source_project_res = tower_cli.get_resource('project')
try:
source_project = source_project_res.get(
name=module.params.get('source_project'))
params['source_project'] = source_project['id']
except (exc.NotFound) as excinfo:
module.fail_json(
msg='Failed to update source project,'
'project not found: {0}'.format(excinfo),
changed=False
)
if module.params.get('source_script'):
source_script_res = tower_cli.get_resource('inventory_script')
try:
script = source_script_res.get(
name=module.params.get('source_script'))
params['source_script'] = script['id']
except (exc.NotFound) as excinfo:
module.fail_json(
msg='Failed to update source script,'
'script not found: {0}'.format(excinfo),
changed=False
)
try:
inventory_res = tower_cli.get_resource('inventory')
params['inventory'] = inventory_res.get(name=inventory)['id']
except (exc.NotFound) as excinfo:
module.fail_json(
msg='Failed to update inventory source, '
'inventory not found: {0}'.format(excinfo),
changed=False
)
for key in ('source_vars', 'timeout', 'source_path',
'update_on_project_update', 'source_regions',
'instance_filters', 'group_by', 'overwrite',
'overwrite_vars', 'update_on_launch',
'update_cache_timeout'):
if module.params.get(key) is not None:
params[key] = module.params.get(key)
if state == 'present':
params['create_on_missing'] = True
result = inventory_source.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
params['fail_on_missing'] = False
result = inventory_source.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to update inventory source: \
{0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
goulu/networkx
|
refs/heads/master
|
networkx/readwrite/tests/test_pajek.py
|
6
|
#!/usr/bin/env python
"""
Pajek tests
"""
from nose.tools import assert_equal
from networkx import *
import os,tempfile
from io import open
from networkx.testing import *
class TestPajek(object):
def setUp(self):
self.data="""*network Tralala\n*vertices 4\n 1 "A1" 0.0938 0.0896 ellipse x_fact 1 y_fact 1\n 2 "Bb" 0.8188 0.2458 ellipse x_fact 1 y_fact 1\n 3 "C" 0.3688 0.7792 ellipse x_fact 1\n 4 "D2" 0.9583 0.8563 ellipse x_fact 1\n*arcs\n1 1 1 h2 0 w 3 c Blue s 3 a1 -130 k1 0.6 a2 -130 k2 0.6 ap 0.5 l "Bezier loop" lc BlueViolet fos 20 lr 58 lp 0.3 la 360\n2 1 1 h2 0 a1 120 k1 1.3 a2 -120 k2 0.3 ap 25 l "Bezier arc" lphi 270 la 180 lr 19 lp 0.5\n1 2 1 h2 0 a1 40 k1 2.8 a2 30 k2 0.8 ap 25 l "Bezier arc" lphi 90 la 0 lp 0.65\n4 2 -1 h2 0 w 1 k1 -2 k2 250 ap 25 l "Circular arc" c Red lc OrangeRed\n3 4 1 p Dashed h2 0 w 2 c OliveGreen ap 25 l "Straight arc" lc PineGreen\n1 3 1 p Dashed h2 0 w 5 k1 -1 k2 -20 ap 25 l "Oval arc" c Brown lc Black\n3 3 -1 h1 6 w 1 h2 12 k1 -2 k2 -15 ap 0.5 l "Circular loop" c Red lc OrangeRed lphi 270 la 180"""
self.G=nx.MultiDiGraph()
self.G.add_nodes_from(['A1', 'Bb', 'C', 'D2'])
self.G.add_edges_from([('A1', 'A1'), ('A1', 'Bb'), ('A1', 'C'),
('Bb', 'A1'),('C', 'C'), ('C', 'D2'),
('D2', 'Bb')])
self.G.graph['name']='Tralala'
(fd,self.fname)=tempfile.mkstemp()
with os.fdopen(fd, 'wb') as fh:
fh.write(self.data.encode('UTF-8'))
def tearDown(self):
os.unlink(self.fname)
def test_parse_pajek_simple(self):
# Example without node positions or shape
data="""*Vertices 2\n1 "1"\n2 "2"\n*Edges\n1 2\n2 1"""
G=parse_pajek(data)
assert_equal(sorted(G.nodes()), ['1', '2'])
assert_edges_equal(G.edges(), [('1', '2'), ('1', '2')])
def test_parse_pajek(self):
G=parse_pajek(self.data)
assert_equal(sorted(G.nodes()), ['A1', 'Bb', 'C', 'D2'])
assert_edges_equal(G.edges(), [('A1', 'A1'), ('A1', 'Bb'),
('A1', 'C'), ('Bb', 'A1'),
('C', 'C'), ('C', 'D2'), ('D2', 'Bb')])
def test_parse_pajet_mat(self):
data = """*Vertices 3\n1 "one"\n2 "two"\n3 "three"\n*Matrix\n1 1 0\n0 1 0\n0 1 0\n"""
G=parse_pajek(data)
assert_equal(set(G.nodes()), {'one', 'two', 'three'})
assert_equal(G.node['two'], {'id': '2'})
assert_edges_equal(set(G.edges()), {('one', 'one'), ('two', 'one'), ('two', 'two'), ('two', 'three')})
def test_read_pajek(self):
G=parse_pajek(self.data)
Gin=read_pajek(self.fname)
assert_equal(sorted(G.nodes()), sorted(Gin.nodes()))
assert_edges_equal(G.edges(), Gin.edges())
assert_equal(self.G.graph,Gin.graph)
for n in G.node:
assert_equal(G.node[n],Gin.node[n])
def test_noname(self):
# Make sure we can parse a line such as: *network
# Issue #952
line = "*network\n"
other_lines = self.data.split('\n')[1:]
data = line + '\n'.join(other_lines)
G = parse_pajek(data)
def test_unicode(self):
import io
G = nx.Graph()
try: # Python 3.x
name1 = chr(2344) + chr(123) + chr(6543)
name2 = chr(5543) + chr(1543) + chr(324)
except ValueError: # Python 2.6+
name1 = unichr(2344) + unichr(123) + unichr(6543)
name2 = unichr(5543) + unichr(1543) + unichr(324)
G.add_edge(name1, 'Radiohead', foo=name2)
fh = io.BytesIO()
nx.write_pajek(G,fh)
fh.seek(0)
H=nx.read_pajek(fh)
assert_nodes_equal(list(G), list(H))
assert_edges_equal(list(G.edges()), list(H.edges()))
assert_equal(G.graph, H.graph)
|
eciis/web
|
refs/heads/dev
|
backend/test/model_test/invite_user_adm_test.py
|
1
|
# -*- coding: utf-8 -*-
"""Invite User Adm Test."""
from .. import mocks
from ..test_base import TestBase
from models import Institution
from models import InviteUserAdm
from models import User
from custom_exceptions import NotAuthorizedException
class InviteUserAdmTest(TestBase):
"""Test invite user adm model."""
@classmethod
def setUp(cls):
"""Provide the base for the tests."""
cls.test = cls.testbed.Testbed()
cls.test.activate()
cls.policy = cls.datastore.PseudoRandomHRConsistencyPolicy(
probability=1)
cls.test.init_datastore_v3_stub(consistency_policy=cls.policy)
cls.test.init_memcache_stub()
def test_create(self):
"""Test create new invite."""
institution = mocks.create_institution()
admin = mocks.create_user()
new_admin = mocks.create_user()
institution.add_member(admin)
admin.institutions.append(institution.key)
institution.set_admin(admin.key)
institution.add_member(new_admin)
admin.add_institution_admin(institution.key)
institution.put()
admin.put()
data = {
"invitee": new_admin.email[0],
"institution_key": institution.key.urlsafe(),
"admin_key": admin.key.urlsafe(),
"is_request": False,
"sender_key": admin.key.urlsafe(),
"sender_name": admin.name,
"invitee_key": new_admin.key.urlsafe()
}
created_invite = InviteUserAdm.create(data)
expected_invite = InviteUserAdm()
expected_invite.invitee = new_admin.email[0]
expected_invite.admin_key = admin.key
expected_invite.is_request = False
expected_invite.institution_key = institution.key
expected_invite.sender_key = admin.key
expected_invite.sender_name = admin.name
expected_invite.invitee_key = new_admin.key
self.assertEquals(
created_invite,
expected_invite,
"The created invite should be equal to the expected one"
)
def test_create_invite_with_invitee_not_a_member(self):
"""Test create invite whith invitee not a member."""
institution = mocks.create_institution()
admin = mocks.create_user()
new_admin = mocks.create_user()
institution.add_member(admin)
admin.institutions.append(institution.key)
institution.set_admin(admin.key)
admin.add_institution_admin(institution.key)
institution.put()
admin.put()
data = {
"invitee": new_admin.email[0],
"institution_key": institution.key.urlsafe(),
"admin_key": admin.key.urlsafe(),
"is_request": False,
"sender_key": admin.key.urlsafe(),
"sender_name": admin.name,
"invitee_key": new_admin.key.urlsafe()
}
with self.assertRaises(NotAuthorizedException) as raises_context:
InviteUserAdm.create(data)
message_exeption = str(raises_context.exception)
self.assertEqual(
message_exeption,
'The invitee is not a member of this institution!',
'Expected message of exception must be equal to The invitee is not a member of this institution!'
)
def test_create_more_than_one_invitation(self):
"""Test create more than one invitation."""
institution = mocks.create_institution()
admin = mocks.create_user()
new_admin = mocks.create_user()
institution.add_member(admin)
admin.institutions.append(institution.key)
institution.set_admin(admin.key)
institution.add_member(new_admin)
admin.add_institution_admin(institution.key)
institution.put()
admin.put()
data = {
"invitee": new_admin.email[0],
"institution_key": institution.key.urlsafe(),
"admin_key": admin.key.urlsafe(),
"is_request": False,
"sender_key": admin.key.urlsafe(),
"sender_name": admin.name,
"invitee_key": new_admin.key.urlsafe()
}
created_invite = InviteUserAdm.create(data)
created_invite.put()
with self.assertRaises(NotAuthorizedException) as raises_context:
InviteUserAdm.create(data)
message_exeption = str(raises_context.exception)
self.assertEqual(
message_exeption,
'An invitation is already being processed for this institution!',
'Expected message of exception must be equal to An invitation is already being processed for this institution!'
)
def test_create_user_not_admin(self):
"""Test create with user not admin."""
institution = mocks.create_institution()
admin = mocks.create_user()
new_admin = mocks.create_user()
institution.add_member(admin)
admin.institutions.append(institution.key)
institution.set_admin(admin.key)
institution.add_member(new_admin)
admin.add_institution_admin(institution.key)
institution.put()
admin.put()
data = {
"invitee": new_admin.email[0],
"institution_key": institution.key.urlsafe(),
"admin_key": new_admin.key.urlsafe(),
"is_request": False,
"sender_key": new_admin.key.urlsafe(),
"sender_name": new_admin.name,
"invitee_key": new_admin.key.urlsafe()
}
with self.assertRaises(NotAuthorizedException) as raises_context:
InviteUserAdm.create(data)
message_exeption = str(raises_context.exception)
self.assertEqual(
message_exeption,
'Sender is not admin of this institution!',
'Expected message of exception must be equal to Sender is not admin of this institution!'
)
def test_create_user_already_admin(self):
"""Test create with user already admin."""
institution = mocks.create_institution()
admin = mocks.create_user()
institution.add_member(admin)
admin.institutions.append(institution.key)
institution.set_admin(admin.key)
admin.add_institution_admin(institution.key)
institution.put()
admin.put()
data = {
"invitee": admin.email[0],
"institution_key": institution.key.urlsafe(),
"admin_key": admin.key.urlsafe(),
"is_request": False,
"sender_key": admin.key.urlsafe(),
"sender_name": admin.name,
"invitee_key": admin.key.urlsafe()
}
with self.assertRaises(NotAuthorizedException) as raises_context:
InviteUserAdm.create(data)
message_exeption = str(raises_context.exception)
self.assertEqual(
message_exeption,
'The invitee is already admin of this institution!',
'Expected message of exception must be equal to The invitee is already admin of this institution!'
)
def test_make(self):
"""Test make invite."""
institution = mocks.create_institution()
admin = mocks.create_user()
new_admin = mocks.create_user()
institution.add_member(admin)
admin.institutions.append(institution.key)
institution.set_admin(admin.key)
institution.add_member(new_admin)
admin.add_institution_admin(institution.key)
institution.put()
admin.put()
data = {
"invitee": new_admin.email[0],
"institution_key": institution.key.urlsafe(),
"admin_key": admin.key.urlsafe(),
"is_request": False,
"sender_key": admin.key.urlsafe(),
"sender_name": admin.name,
"invitee_key": new_admin.key.urlsafe()
}
created_invite = InviteUserAdm.create(data)
created_invite.put()
maked_invite = created_invite.make()
expected_maked_invite = {
"invitee": new_admin.email[0],
"sender_name": admin.name,
"admin_name": admin.name,
"key": created_invite.key.urlsafe(),
"status": created_invite.status,
"institution_admin": {"name": institution.name},
"institution": institution.make(InviteUserAdm.INST_PROPS_TO_MAKE),
"institution_key": institution.key.urlsafe(),
"invitee_key": new_admin.key.urlsafe(),
"invitee_name": new_admin.name,
"type_of_invite": "INVITE_USER_ADM"
}
self.assertEquals(
maked_invite,
expected_maked_invite,
"The maked invite should be equal to the expected one"
)
|
willu47/smif
|
refs/heads/develop
|
src/smif/data_layer/memory_interface.py
|
1
|
"""Memory-backed store implementations
"""
from collections import OrderedDict
from copy import copy, deepcopy
from smif.data_layer.abstract_config_store import ConfigStore
from smif.data_layer.abstract_data_store import DataStore
from smif.data_layer.abstract_metadata_store import MetadataStore
from smif.data_layer.data_array import DataArray
from smif.exception import (SmifDataExistsError, SmifDataMismatchError,
SmifDataNotFoundError)
class MemoryConfigStore(ConfigStore):
"""Config store in memory
"""
def __init__(self):
super().__init__()
self._model_runs = OrderedDict()
self._sos_models = OrderedDict()
self._models = OrderedDict()
self._scenarios = OrderedDict()
self._narratives = OrderedDict()
self._strategies = OrderedDict()
# region Model runs
def read_model_runs(self):
return list(self._model_runs.values())
def read_model_run(self, model_run_name):
try:
return self._model_runs[model_run_name]
except KeyError:
raise SmifDataNotFoundError("sos_model_run '%s' not found" % (model_run_name))
def write_model_run(self, model_run):
if model_run['name'] not in self._model_runs:
self._model_runs[model_run['name']] = model_run
else:
raise SmifDataExistsError("model_run '%s' already exists" % (model_run['name']))
def update_model_run(self, model_run_name, model_run):
if model_run_name in self._model_runs:
self._model_runs[model_run_name] = model_run
else:
raise SmifDataNotFoundError("model_run '%s' not found" % (model_run_name))
def delete_model_run(self, model_run_name):
try:
del self._model_runs[model_run_name]
except KeyError:
raise SmifDataNotFoundError("model_run '%s' not found" % (model_run_name))
# endregion
# region System-of-systems models
def read_sos_models(self):
return list(self._sos_models.values())
def read_sos_model(self, sos_model_name):
try:
return self._sos_models[sos_model_name]
except KeyError:
raise SmifDataNotFoundError("sos_model '%s' not found" % (sos_model_name))
def write_sos_model(self, sos_model):
if sos_model['name'] not in self._sos_models:
self._sos_models[sos_model['name']] = sos_model
else:
raise SmifDataExistsError("sos_model '%s' already exists" % (sos_model['name']))
def update_sos_model(self, sos_model_name, sos_model):
if sos_model_name in self._sos_models:
self._sos_models[sos_model_name] = sos_model
else:
raise SmifDataNotFoundError("sos_model '%s' not found" % (sos_model_name))
def delete_sos_model(self, sos_model_name):
try:
del self._sos_models[sos_model_name]
except KeyError:
raise SmifDataNotFoundError("sos_model '%s' not found" % (sos_model_name))
# endregion
# region Models
def read_models(self):
return list(self._models.values())
def read_model(self, model_name):
try:
return self._models[model_name]
except KeyError:
raise SmifDataNotFoundError("model '%s' not found" % (model_name))
def write_model(self, model):
if model['name'] not in self._models:
model = _skip_coords(model, ('inputs', 'outputs', 'parameters'))
self._models[model['name']] = model
else:
raise SmifDataExistsError("model '%s' already exists" % (model['name']))
def update_model(self, model_name, model):
if model_name in self._models:
model = _skip_coords(model, ('inputs', 'outputs', 'parameters'))
self._models[model_name] = model
else:
raise SmifDataNotFoundError("model '%s' not found" % (model_name))
def delete_model(self, model_name):
try:
del self._models[model_name]
except KeyError:
raise SmifDataNotFoundError("model '%s' not found" % (model_name))
# endregion
# region Scenarios
def read_scenarios(self):
scenarios = self._scenarios.values()
return [_variant_dict_to_list(s) for s in scenarios]
def read_scenario(self, scenario_name):
try:
scenario = self._scenarios[scenario_name]
return _variant_dict_to_list(scenario)
except KeyError:
raise SmifDataNotFoundError("scenario '%s' not found" % (scenario_name))
def write_scenario(self, scenario):
if scenario['name'] not in self._scenarios:
scenario = _variant_list_to_dict(scenario)
scenario = _skip_coords(scenario, ['provides'])
self._scenarios[scenario['name']] = scenario
else:
raise SmifDataExistsError("scenario '%s' already exists" % (scenario['name']))
def update_scenario(self, scenario_name, scenario):
if scenario_name in self._scenarios:
scenario = _variant_list_to_dict(scenario)
scenario = _skip_coords(scenario, ['provides'])
self._scenarios[scenario_name] = scenario
else:
raise SmifDataNotFoundError("scenario '%s' not found" % (scenario_name))
def delete_scenario(self, scenario_name):
try:
del self._scenarios[scenario_name]
except KeyError:
raise SmifDataNotFoundError("scenario '%s' not found" % (scenario_name))
# endregion
# region Scenario Variants
def read_scenario_variants(self, scenario_name):
return list(self._scenarios[scenario_name]['variants'].values())
def read_scenario_variant(self, scenario_name, variant_name):
try:
return self._scenarios[scenario_name]['variants'][variant_name]
except KeyError:
raise SmifDataNotFoundError("scenario '%s' variant '%s' not found"
% (scenario_name, variant_name))
def write_scenario_variant(self, scenario_name, variant):
self._scenarios[scenario_name]['variants'][variant['name']] = variant
def update_scenario_variant(self, scenario_name, variant_name, variant):
self._scenarios[scenario_name]['variants'][variant_name] = variant
def delete_scenario_variant(self, scenario_name, variant_name):
del self._scenarios[scenario_name]['variants'][variant_name]
# endregion
# region Narratives
def _read_narratives(self, sos_model_name):
return self._sos_models[sos_model_name]['narratives']
def read_narrative(self, sos_model_name, narrative_name):
try:
narrative = [x for x in self._read_narratives(sos_model_name)
if x['name'] == narrative_name][0]
except IndexError:
msg = "Narrative '{}' not found in '{}'"
raise SmifDataNotFoundError(msg.format(narrative_name, sos_model_name))
return narrative
def _read_narrative_variant(self, sos_model_name, narrative_name, variant_name):
narrative = self.read_narrative(sos_model_name, narrative_name)
try:
variant = [x for x in narrative['variants'] if x['name'] == variant_name][0]
except IndexError:
msg = "Variant '{}' not found in '{}'"
raise SmifDataNotFoundError(msg.format(variant_name, narrative_name))
return variant
# endregion
# region Strategies
def read_strategies(self, modelrun_name):
try:
return self._strategies[modelrun_name]
except KeyError:
raise SmifDataNotFoundError("strategies in modelrun '%s' not found"
% (modelrun_name))
def write_strategies(self, modelrun_name, strategies):
self._strategies[modelrun_name] = strategies
# endregion
class MemoryMetadataStore(MetadataStore):
"""Store metadata in-memory
"""
def __init__(self):
super().__init__()
self._units = [] # list[str] of pint definitions
self._dimensions = OrderedDict()
# region Units
def write_unit_definitions(self, units):
self._units = units
def read_unit_definitions(self):
return self._units
# endregion
# region Dimensions
def read_dimensions(self, skip_coords=False):
return [self.read_dimension(k, skip_coords) for k in self._dimensions]
def read_dimension(self, dimension_name, skip_coords=False):
dim = self._dimensions[dimension_name]
if skip_coords:
dim = {
'name': dim['name'],
'description': dim['description']
}
return dim
def write_dimension(self, dimension):
self._dimensions[dimension['name']] = dimension
def update_dimension(self, dimension_name, dimension):
self._dimensions[dimension['name']] = dimension
def delete_dimension(self, dimension_name):
del self._dimensions[dimension_name]
# endregion
class MemoryDataStore(DataStore):
"""Store data in-memory
"""
def __init__(self):
super().__init__()
self._data_array = OrderedDict()
self._interventions = OrderedDict()
self._initial_conditions = OrderedDict()
self._state = OrderedDict()
self._model_parameter_defaults = OrderedDict()
self._coefficients = OrderedDict()
self._results = OrderedDict()
# region Data Array
def read_scenario_variant_data(self, key, spec, timestep=None):
return self._read_data_array(key, spec, timestep)
def write_scenario_variant_data(self, key, data, timestep=None):
self._write_data_array(key, data, timestep)
def read_narrative_variant_data(self, key, spec, timestep=None):
return self._read_data_array(key, spec, timestep)
def write_narrative_variant_data(self, key, data, timestep=None):
self._write_data_array(key, data, timestep)
def _read_data_array(self, key, spec, timestep=None):
if timestep:
try:
data = self._data_array[key, timestep]
except KeyError:
try:
data = self._filter_timestep(self._data_array[key], spec, timestep)
except KeyError:
raise SmifDataNotFoundError(
"Data for {} not found for timestep {}".format(spec.name, timestep))
else:
try:
data = self._data_array[key]
except KeyError:
raise SmifDataNotFoundError(
"Data for {} not found".format(spec.name))
if data.spec != spec:
raise SmifDataMismatchError(
"Spec did not match reading {}, requested {}, got {}".format(
spec.name, spec, data.spec))
return data
def _filter_timestep(self, data, read_spec, timestep):
dataframe = data.as_df().reset_index()
if 'timestep' not in dataframe.columns:
msg = "Missing 'timestep' key, found {} in {}"
raise SmifDataMismatchError(msg.format(list(dataframe.columns), data.name))
dataframe = dataframe[dataframe.timestep == timestep]
if dataframe.empty:
raise SmifDataNotFoundError(
"Data for {} not found for timestep {}".format(data.name, timestep))
dataframe.drop('timestep', axis=1, inplace=True)
return DataArray.from_df(read_spec, dataframe)
def _write_data_array(self, key, data, timestep=None):
if timestep:
self._data_array[key, timestep] = data
else:
self._data_array[key] = data
# endregion
# region Model parameters
def read_model_parameter_default(self, key, spec):
data = self._model_parameter_defaults[key]
if data.spec != spec:
raise SmifDataMismatchError(
"Spec did not match reading {}, requested {}, got {}".format(
spec.name, spec, data.spec))
return data
def write_model_parameter_default(self, key, data):
self._model_parameter_defaults[key] = data
# endregion
# region Interventions
def read_interventions(self, keys):
all_interventions = {}
interventions = [list(self._interventions[key].values()) for key in keys][0]
for entry in interventions:
name = entry.pop('name')
if name in all_interventions:
msg = "An entry for intervention {} already exists"
raise ValueError(msg.format(name))
else:
all_interventions[name] = entry
return all_interventions
def write_interventions(self, key, interventions):
self._interventions[key] = interventions
def read_strategy_interventions(self, strategy):
return strategy['interventions']
def read_initial_conditions(self, keys):
return [self._initial_conditions[key] for key in keys][0]
def write_initial_conditions(self, key, initial_conditions):
self._initial_conditions[key] = initial_conditions
# endregion
# region State
def read_state(self, modelrun_name, timestep=None, decision_iteration=None):
return self._state[(modelrun_name, timestep, decision_iteration)]
def write_state(self, state, modelrun_name, timestep=None, decision_iteration=None):
self._state[(modelrun_name, timestep, decision_iteration)] = state
# endregion
# region Conversion coefficients
def read_coefficients(self, source_dim, destination_dim):
try:
return self._coefficients[(source_dim, destination_dim)]
except KeyError:
msg = "Could not find coefficients for conversion from {}>{}"
raise SmifDataNotFoundError(msg.format(source_dim, destination_dim))
def write_coefficients(self, source_dim, destination_dim, data):
self._coefficients[(source_dim, destination_dim)] = data
# endregion
# region Results
def read_results(self, modelrun_name, model_name, output_spec, timestep=None,
decision_iteration=None):
key = (modelrun_name, model_name, output_spec.name, timestep, decision_iteration)
try:
results = self._results[key]
except KeyError:
raise SmifDataNotFoundError("Cannot find results for {}".format(key))
return DataArray(output_spec, results)
def write_results(self, data_array, modelrun_name, model_name, timestep=None,
decision_iteration=None):
key = (modelrun_name, model_name, data_array.spec.name, timestep, decision_iteration)
self._results[key] = data_array.as_ndarray()
def available_results(self, model_run_name):
results_keys = [
(timestep, decision_iteration, model_name, output_name)
for (result_modelrun_name, model_name, output_name, timestep, decision_iteration)
in self._results.keys()
if model_run_name == result_modelrun_name
]
return results_keys
# endregion
def _variant_list_to_dict(config):
config = copy(config)
try:
list_ = config['variants']
except KeyError:
list_ = []
config['variants'] = {variant['name']: variant for variant in list_}
return config
def _variant_dict_to_list(config):
config = copy(config)
try:
dict_ = config['variants']
except KeyError:
dict_ = {}
config['variants'] = list(dict_.values())
return config
def _skip_coords(config, keys):
"""Given a config dict and list of top-level keys for lists of specs,
delete coords from each spec in each list.
"""
config = deepcopy(config)
for key in keys:
for spec in config[key]:
try:
del spec['coords']
except KeyError:
pass
return config
|
benoitsteiner/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/python/kernel_tests/pooling_ops_test.py
|
19
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for pooling operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def GetTestConfigs(include_nchw_vect_c=False):
"""Get all the valid tests configs to run.
Args:
include_nchw_vect_c: Whether to include NCHW_VECT_C in the test configs.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if not test.is_gpu_available(cuda_only=True):
tf_logging.info("NCHW and NCHW_VECT_C tests skipped because not run with "
"--config=cuda or no GPUs available.")
return test_configs
# "NCHW" format is currently supported exclusively on CUDA GPUs.
test_configs += [("NCHW", True)]
if include_nchw_vect_c:
if test.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(6, 1)):
test_configs += [("NCHW_VECT_C", True)]
else:
tf_logging.info("NCHW_VECT_C test skipped because no GPUs with "
"compute capability >= 6.1 are available.")
return test_configs
def GetShrunkInceptionMaxPoolShapes(shrink=30):
"""Iterator for some of the max pool ops in the Inception 2015 model.
Args:
shrink: Factor to shrink depth relative to Inception.
Yields:
Tuple (name, input_size, filter_size, out_size, strides, padding)
"""
names = ["maxpool2", "maxpool3", "maxpool4", "maxpool5"]
input_sizes = [[32, 71, 71, 192], [32, 35, 35, 288], [32, 17, 17, 1248],
[32, 8, 8, 2048]]
filter_sizes = [[1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1], [1, 3, 3, 1]]
output_sizes = [[32, 35, 35, 192], [32, 17, 17, 288], [32, 8, 8, 1248],
[32, 8, 8, 2048]]
strides = [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1], [1, 1, 1, 1]]
# Shrink each depth value
for i in input_sizes:
i[3] //= shrink
for o in output_sizes:
o[3] //= shrink
paddings = ["VALID", "VALID", "VALID", "SAME"]
for n, i, f, o, s, p in zip(names, input_sizes, filter_sizes, output_sizes,
strides, paddings):
yield n, i, f, o, s, p
class PoolingTest(test.TestCase):
def _VerifyOneType(self, pool_func, input_sizes, ksize, strides, padding,
data_format, data_type, expected, use_gpu, v2):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
data_type: The data type to use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
"""
total_size = 1
for s in input_sizes:
total_size *= s
if v2 and data_format != "NHWC":
tf_logging.info("v2 not supported for %s", data_format)
return
if data_format == "NCHW_VECT_C":
if data_type != dtypes.float32:
tf_logging.info("quantization to qint8 not implemented for %r",
data_type)
return
if input_sizes[-1] % 4 != 0:
tf_logging.info("Skipping test for depth %d", input_sizes[-1])
return
tf_logging.info("Running %s test. %r %r %d %r %r %r %s", data_format, v2,
input_sizes, total_size, pool_func, ksize, strides,
data_type)
# Initializes the input tensor with array containing incrementing
# numbers from 1, wrapping round to -127 after 127 to support int8.
x = [((f + 128) % 255) - 127 for f in range(total_size)]
with self.test_session(use_gpu=use_gpu):
t = constant_op.constant(x, shape=input_sizes, dtype=data_type)
if data_format in ("NCHW", "NCHW_VECT_C"):
if data_format == "NCHW_VECT_C":
t = test_util.NHWCToNCHW_VECT_C(t)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
else:
t = test_util.NHWCToNCHW(t)
ksize = test_util.NHWCToNCHW(ksize)
strides = test_util.NHWCToNCHW(strides)
ksize_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])
strides_placeholder = array_ops.placeholder(dtypes.int32, shape=[4])
if v2:
t = pool_func(
t,
ksize=ksize_placeholder,
strides=strides_placeholder,
padding=padding,
data_format=data_format)
else:
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW_VECT_C":
t = gen_array_ops.dequantize(t, -128, 127)
t = test_util.NCHW_VECT_CToNHWC(t)
elif data_format == "NCHW":
t = test_util.NCHWToNHWC(t)
if v2:
actual = t.eval(feed_dict={
ksize_placeholder: ksize,
strides_placeholder: strides
})
else:
actual = t.eval()
self.assertShapeEqual(actual, t)
self.assertAllCloseAccordingToType(expected, actual.flatten())
def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
data_format, expected, use_gpu, v2):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
data_format: The data format we use to run the pooling operation.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
"""
if data_format == "NCHW_VECT_C":
avg_pool_func = nn_ops.avg_pool
tf_logging.info("pool_func=%s", pool_func)
if pool_func == avg_pool_func:
tf_logging.info("NCHW_VECT_C not yet implemented for avg_pool")
return
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float32, expected, use_gpu, v2)
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float64, expected, use_gpu, v2)
if not use_gpu or test_util.CudaSupportsHalfMatMulAndConv():
self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
data_format, dtypes.float16, expected, use_gpu, v2)
def _VerifyValues(self,
pool_func,
input_sizes,
ksize,
strides,
padding,
expected,
use_gpu,
v2=False):
"""Verifies the output values of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
ksize: The kernel size dimensions
strides: The stride dimensions
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether we are running on GPU.
"""
for (data_format, use_gpu_2) in GetTestConfigs(True):
if use_gpu_2 == use_gpu:
self._VerifyOneTest(pool_func, input_sizes, ksize, strides, padding,
data_format, expected, use_gpu, v2)
def _testAvgPoolValidPadding(self, use_gpu):
expected_output = [7.0, 8.0, 9.0]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolSamePadding(self, use_gpu):
expected_output = [8.5, 9.5, 10.5, 14.5, 15.5, 16.5]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 4, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
# [avg(1.0, 2.0), avg(2.0, padded0),
# avg(3.0, 4.0), avg(4.0, padded0)]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[1.5, 2.0, 3.5, 4.0],
use_gpu=use_gpu)
# Window of [x,
# x] should do:
# [avg(1.0, 3.0), avg(2.0, 4.0)
# avg(3.0, padded0), avg(4.0, padded0)]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 2, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 3.0, 3.0, 4.0],
use_gpu=use_gpu)
def _testAvgPoolSamePaddingNonSquareWindowMultiBatch(self, use_gpu):
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[2, 2, 2, 2],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[
2.0, 3.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 10.0, 11.0, 11.0, 12.0,
14.0, 15.0, 15.0, 16.0
],
use_gpu=use_gpu)
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[2, 2, 2, 2],
ksize=[1, 2, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[
3.0, 4.0, 5.0, 6.0, 5.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0,
13.0, 14.0, 15.0, 16.0
],
use_gpu=use_gpu)
def _testAvgPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 16.0, 17.0, 18.0],
use_gpu=use_gpu)
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
use_gpu=use_gpu)
def _testAvgPoolSamePadding4(self, use_gpu):
expected_output = [
11.0, 12.0, 13.0, 14.0, 19.0, 20.0, 21.0, 22.0, 43.0, 44.0, 45.0, 46.0,
51.0, 52.0, 53.0, 54.0
]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket4(self, use_gpu):
expected_output = [
21.0, 22.0, 23.0, 24.0, 27.0, 28.0, 29.0, 30.0, 45.0, 46.0, 47.0, 48.0,
51.0, 52.0, 53.0, 54.0
]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolSamePaddingPacket8(self, use_gpu):
expected_output = [
-12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, 4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0,
32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, -3.5, -54.0, -53.0, -52.0,
-51.0, -50.0, -49.0, -48.0, -47.0, -38.0, -37.0, -36.0, -35.0, -34.0,
-33.0, -32.0, -31.0, -22.0, -21.0, -20.0, -19.0, -18.0, -17.0, -16.0,
-15.0, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -4.0, -3.0, -11.0, -10.0,
-9.0, -8.0, -7.0, -6.0, -5.0, -4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0,
12.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 33.0, 34.0, 35.0,
36.0, 37.0, 38.0, -3.5, -2.5, -85.0, -84.0, -83.0, -82.0, -81.0, -80.0,
-79.0, -78.0, -69.0, -68.0, -67.0, -66.0, -65.0, -64.0, -63.0, -62.0,
-53.0, -52.0, -51.0, -50.0, -49.0, -48.0, -47.0, -46.0, -41.0, -40.0,
-39.0, -38.0, -37.0, -36.0, -35.0, -34.0
]
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
def _testAvgPoolEmptyInput(self, use_gpu):
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[0, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[],
use_gpu=use_gpu)
def testAvgPooling(self):
for use_gpu in True, False:
self._testAvgPoolValidPadding(use_gpu)
self._testAvgPoolSamePadding(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindow(use_gpu)
self._testAvgPoolSamePaddingNonSquareWindowMultiBatch(use_gpu)
self._testAvgPoolValidPaddingUnevenStride(use_gpu)
self._testAvgPoolSamePadding4(use_gpu)
self._testAvgPoolSamePaddingPacket4(use_gpu)
self._testAvgPoolSamePaddingPacket8(use_gpu)
self._testAvgPoolEmptyInput(use_gpu)
def _testMaxPoolValidPadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output,
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 3, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="VALID",
expected=expected_output,
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolSamePadding(self, use_gpu):
expected_output = [13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 2, 3, 3],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolSamePaddingNonSquareWindow(self, use_gpu):
# input is:
# [1.0, 2.0
# 3.0 4.0]
#
# Window of [x, x] should do:
#
# [max(1.0, 2.0), max(2.0, padded0),
# max(3.0, 4.0), max(4.0, padded0)]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0],
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 2, 2, 1],
ksize=[1, 1, 2, 1],
strides=[1, 1, 1, 1],
padding="SAME",
expected=[2.0, 2.0, 4.0, 4.0],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolValidPaddingUnevenStride(self, use_gpu):
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
use_gpu=use_gpu)
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 1, 2, 1],
padding="VALID",
expected=[6.0, 8.0, 10.0, 12.0, 14.0, 16.0],
use_gpu=use_gpu,
v2=v2)
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 1],
ksize=[1, 2, 2, 1],
strides=[1, 2, 1, 1],
padding="VALID",
expected=[6.0, 7.0, 8.0, 14.0, 15.0, 16.0],
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolSamePaddingPacket4(self, use_gpu):
expected_output = [
21.0, 22.0, 23.0, 24.0, 29.0, 30.0, 31.0, 32.0, 53.0, 54.0, 55.0, 56.0,
61.0, 62.0, 63.0, 64.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 4],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolSamePaddingPacket8(self, use_gpu):
expected_output = [
81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 97.0, 98.0, 99.0, 100.0,
101.0, 102.0, 103.0, 104.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0,
119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0, 120.0,
18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 34.0, 35.0, 36.0, 37.0,
38.0, 39.0, 40.0, 41.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0,
58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 82.0, 83.0, 84.0, 85.0,
86.0, 87.0, 88.0, 89.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0,
105.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0,
123.0, 124.0, 125.0, 126.0, 127.0, 120.0, 121.0, -45.0, -44.0, -43.0,
-42.0, -41.0, -40.0, -39.0, -38.0, -29.0, -28.0, -27.0, -26.0, -25.0,
-24.0, -23.0, -22.0, -13.0, -12.0, -11.0, -10.0, -9.0, -8.0, -7.0, -6.0,
-5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0
]
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=expected_output,
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolEmptyInput(self, use_gpu):
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[0, 8, 8, 8],
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[],
use_gpu=use_gpu)
def testMaxPooling(self):
for use_gpu in True, False:
self._testMaxPoolValidPadding(use_gpu)
self._testMaxPoolSamePadding(use_gpu)
self._testMaxPoolSamePaddingNonSquareWindow(use_gpu)
self._testMaxPoolValidPaddingUnevenStride(use_gpu)
self._testMaxPoolSamePaddingPacket4(use_gpu)
self._testMaxPoolSamePaddingPacket8(use_gpu)
self._testMaxPoolEmptyInput(use_gpu)
# Tests for DepthwiseMaxPooling on CPU only.
def testDepthwiseMaxPool1x1DepthWindow1(self):
# input is:
# [1.0, ..., 10.0] along depth,
#
# We maxpool by depth in patches of 2.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0],
use_gpu=False)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 1, 1, 10],
ksize=[1, 1, 1, 2],
strides=[1, 1, 1, 2],
padding="SAME",
expected=[2.0, 4.0, 6.0, 8.0, 10.0],
use_gpu=False,
v2=v2)
def testDepthwiseMaxPool2x2DepthWindow3(self):
# input is:
#
# a 2x2x6 cube, and we depthwise max across 3 to produce a 2x2x2
# output. Each node has contiguous values, so the depthwise max
# should be multiples of 3.0.
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
use_gpu=False)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 2, 2, 6],
ksize=[1, 1, 1, 3],
strides=[1, 1, 1, 3],
padding="SAME",
expected=[3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0],
use_gpu=False,
v2=v2)
def testKernelSmallerThanStrideValid(self):
for use_gpu in [True, False]:
self._VerifyValues(
nn_ops.max_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33],
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[9, 12, 30, 33],
use_gpu=use_gpu,
v2=v2)
self._VerifyValues(
nn_ops.avg_pool,
input_sizes=[1, 7, 7, 1],
ksize=[1, 2, 2, 1],
strides=[1, 3, 3, 1],
padding="VALID",
expected=[5, 8, 26, 29],
use_gpu=use_gpu)
def testKernelSmallerThanStrideSame(self):
for use_gpu in [True, False]:
for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]:
self._VerifyValues(
pool_func,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9],
use_gpu=use_gpu)
self._VerifyValues(
pool_func,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11],
use_gpu=use_gpu)
for v2 in [True, False]:
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 3, 3, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 7, 9],
use_gpu=use_gpu,
v2=v2)
self._VerifyValues(
gen_nn_ops.max_pool_v2,
input_sizes=[1, 4, 4, 1],
ksize=[1, 1, 1, 1],
strides=[1, 2, 2, 1],
padding="SAME",
expected=[1, 3, 9, 11],
use_gpu=use_gpu,
v2=v2)
def _testDepthwiseMaxPoolInvalidConfig(self,
in_size,
ksize,
strides,
error_msg,
use_gpu=False):
with self.test_session(use_gpu=use_gpu):
t = constant_op.constant(1.0, shape=in_size)
with self.assertRaisesRegexp(errors_impl.UnimplementedError, error_msg):
t = nn_ops.max_pool(
t, ksize=ksize, strides=strides, padding="SAME").eval()
def testDepthwiseMaxPoolInvalidConfigs(self):
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 2, 2, 2], [1, 1, 1, 2],
"exactly one of pooling across depth")
self._testDepthwiseMaxPoolInvalidConfig(
[1, 2, 2, 4], [1, 1, 1, 2], [1, 1, 1, 1],
"depth window to equal the depth stride")
self._testDepthwiseMaxPoolInvalidConfig([1, 2, 2, 4], [1, 1, 1, 3],
[1, 1, 1, 3], "evenly divide")
if test.is_gpu_available():
with self.test_session(use_gpu=True):
t = variables.Variable(np.ones([1, 2, 2, 4]))
variables.global_variables_initializer().run()
with self.assertRaisesOpError("for CPU devices"):
nn_ops.max_pool(
t, ksize=[1, 1, 1, 2], strides=[1, 1, 1, 2],
padding="SAME").eval()
# The following are tests that verify that the CPU and GPU implementations
# produce the same results.
def _CompareMaxPoolingFwd(self, input_shape, ksize, strides, padding):
for dtype in np.float64, np.float32, np.float16:
tensor_input = np.random.rand(*input_shape).astype(dtype)
with self.test_session(use_gpu=True):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op, _ = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
gpu_val = out_op.eval()
with self.test_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
cpu_val = out_op.eval()
self.assertAllCloseAccordingToType(cpu_val, gpu_val)
def _CompareMaxPoolingBk(self, input_shape, output_shape, ksize, strides,
padding):
for dtype in np.float64, np.float32, np.float16:
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
tensor_output = np.random.rand(*output_shape).astype(dtype)
with self.test_session(use_gpu=True):
t = constant_op.constant(tensor_input, shape=input_shape)
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = argmax_op.eval()
grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops.max_pool_grad_with_argmax(t, grad_in, argmax, ksize,
strides, padding)
gpu_val = out_op.eval()
self.assertShapeEqual(gpu_val, out_op)
with self.test_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = out_op.eval()
grad_in = constant_op.constant(tensor_output, shape=output_shape)
out_op = gen_nn_ops.max_pool_grad(t, orig_out, grad_in, ksize, strides,
padding)
cpu_val = out_op.eval()
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
# accurate than the GPU version that does the accumulation on fp32
self.assertAllCloseAccordingToType(
cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01)
def _CompareMaxPoolingGradBk(self, input_shape, output_shape, ksize, strides,
padding):
for dtype in np.float64, np.float32, np.float16:
# Generate numbers in a narrow range, so that there are many duplicates
# in the input.
tensor_input = np.random.random_integers(0, 3, input_shape).astype(dtype)
with self.test_session(use_gpu=True):
t = constant_op.constant(tensor_input, shape=input_shape)
_, argmax_op = nn_ops.max_pool_with_argmax(t, ksize, strides, padding)
argmax = argmax_op.eval()
grad_in = constant_op.constant(tensor_input, shape=input_shape)
out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
t, grad_in, argmax, ksize, strides, padding)
gpu_val = out_op.eval()
self.assertShapeEqual(gpu_val, out_op)
with self.test_session(use_gpu=False):
t = constant_op.constant(tensor_input, shape=input_shape)
out_op = nn_ops.max_pool(t, ksize, strides, padding)
orig_out = out_op.eval()
grad_in = constant_op.constant(tensor_input, shape=input_shape)
out_op = gen_nn_ops.max_pool_grad_grad(t, orig_out, grad_in, ksize,
strides, padding)
cpu_val = out_op.eval()
self.assertShapeEqual(cpu_val, out_op)
# The CPU version accumulates its gradient on fp16, so it's less
# accurate than the GPU version that does the accumulation on fp32
self.assertAllCloseAccordingToType(
cpu_val, gpu_val, half_rtol=0.01, half_atol=0.01)
def testMaxPoolingWithArgmax(self):
tensor_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
with self.test_session(use_gpu=True) as sess:
t = constant_op.constant(tensor_input, shape=[1, 3, 3, 1])
out_op, argmax_op = nn_ops.max_pool_with_argmax(
t,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
Targmax=dtypes.int64,
padding="VALID")
out, argmax = sess.run([out_op, argmax_op])
self.assertShapeEqual(out, out_op)
self.assertShapeEqual(argmax, argmax_op)
self.assertAllClose(out.ravel(), [1.0, 1.0, 1.0, 1.0])
self.assertAllEqual(argmax.ravel(), [0, 1, 3, 5])
def testMaxPoolingGradWithArgmax(self):
orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
tensor_input = [11.0, 12.0, 13.0, 14.0]
tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64))
with self.test_session(use_gpu=True):
orig_in = constant_op.constant(orig_input, shape=[1, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[1, 2, 2, 1])
argmax = constant_op.constant(
tensor_argmax, shape=[1, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_with_argmax(
orig_in,
t,
argmax,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID")
out = out_op.eval().flatten()
self.assertAllClose(out,
[11.0, 12.0, 0.0, 13.0, 0.0, 14.0, 0.0, 0.0, 0.0])
def testMaxPoolingGradGradWithArgmax(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
orig_input = [1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0]
tensor_input = [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0]
tensor_argmax = list(np.array([0, 1, 3, 5], dtype=np.int64))
with self.test_session(use_gpu=True):
orig_in = constant_op.constant(orig_input, shape=[1, 3, 3, 1])
t = constant_op.constant(tensor_input, shape=[1, 3, 3, 1])
argmax = constant_op.constant(
tensor_argmax, shape=[1, 2, 2, 1], dtype=dtypes.int64)
out_op = gen_nn_ops.max_pool_grad_grad_with_argmax(
orig_in,
t,
argmax,
ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1],
padding="VALID")
out = out_op.eval().flatten()
self.assertAllClose(out, [11.0, 12.0, 14.0, 16.0])
def _ConstructAndTestGradient(self,
pool_func,
input_sizes,
output_sizes,
window_rows,
window_cols,
row_stride,
col_stride,
padding,
data_format,
use_gpu,
x_init_value=None):
"""Verifies the gradients of the avg pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
data_format: Data format.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
assert input_sizes[0] == output_sizes[0]
assert input_sizes[3] == output_sizes[3]
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
if pool_func == nn_ops.avg_pool:
func_name = "avg_pool"
err_tolerance = 1e-4
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_tolerance = 1e-3
if data_format == "NCHW":
ksize = [1, 1, window_rows, window_rows]
strides = [1, 1, row_stride, col_stride]
t = test_util.NHWCToNCHW(input_tensor)
else:
ksize = [1, window_rows, window_rows, 1]
strides = [1, row_stride, col_stride, 1]
t = input_tensor
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
if data_format == "NCHW":
t = test_util.NCHWToNHWC(t)
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t,
output_sizes,
x_init_value=x_init_value,
delta=1e-2)
tf_logging.info("%s gradient error = " % func_name, err)
self.assertLess(err, err_tolerance)
def _ConstructAndTestSecondGradient(self,
pool_func,
input_sizes,
output_sizes,
window_rows,
window_cols,
row_stride,
col_stride,
padding,
data_format,
use_gpu,
x_init_value=None):
"""Verifies the second-order gradients of the pooling function.
Args:
pool_func: Function to be called, co.MaxPool, co.AvgPool,
or the Lua version.
input_sizes: Input tensor dimensions.
output_sizes: Output tensor dimensions.
window_rows: kernel size in row dim
window_cols: kernel size in col dim
row_stride: Row Stride.
col_stride: Col Stride.
padding: Padding type.
data_format: Data format.
use_gpu: whether we are running on GPU
x_init_value: Values to be passed to the gradient checker.
"""
assert input_sizes[0] == output_sizes[0]
assert input_sizes[3] == output_sizes[3]
total_size = 1
for s in input_sizes:
total_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x = [f * 1.0 for f in range(1, total_size + 1)]
with self.test_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(x, shape=input_sizes, name="input")
if pool_func == nn_ops.avg_pool:
func_name = "avg_pool"
err_tolerance = 1e-3
else:
if x_init_value is None:
x_init_value = np.asfarray(
np.arange(1, total_size + 1),
dtype=np.float32).reshape(input_sizes)
func_name = "max_pool"
err_tolerance = 1e-2
if data_format == "NCHW":
ksize = [1, 1, window_rows, window_rows]
strides = [1, 1, row_stride, col_stride]
t = test_util.NHWCToNCHW(input_tensor)
else:
ksize = [1, window_rows, window_rows, 1]
strides = [1, row_stride, col_stride, 1]
t = input_tensor
t = pool_func(
t,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=func_name)
if data_format == "NCHW":
t = test_util.NHWCToNCHW(t)
t_g = gradients_impl.gradients(t**2, input_tensor)[0]
err = gradient_checker.compute_gradient_error(
input_tensor,
input_sizes,
t_g,
input_sizes,
x_init_value=x_init_value,
delta=1e-2)
tf_logging.info("%s second-order gradient error = " % func_name, err)
self.assertLess(err, err_tolerance)
def _testMaxPoolGradValidPadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_6(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_1_7(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding1_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 2, 2, 1],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradValidPadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding1_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradSamePadding3_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def testMaxPoolGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testMaxPoolGradValidPadding1_1(data_format, use_gpu)
self._testMaxPoolGradValidPadding1_2(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_1_6(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_1_7(data_format, use_gpu)
self._testMaxPoolGradValidPadding2_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding1_1(data_format, use_gpu)
self._testMaxPoolGradSamePadding1_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding2_1(data_format, use_gpu)
self._testMaxPoolGradSamePadding2_2(data_format, use_gpu)
self._testMaxPoolGradSamePadding3_1(data_format, use_gpu)
def _MaxPoolGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding, v2):
"""Max Pooling Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x rows x cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
pool_func = gen_nn_ops.max_pool_grad_v2 if v2 else gen_nn_ops.max_pool_grad
return pool_func(orig_input, orig_output, grad,
[1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
def _testMaxPoolGradDirect(self, input_data, output_backprop,
expected_input_backprop, input_sizes, output_sizes,
window_rows, window_cols, row_stride, col_stride,
padding, use_gpu, v2):
pool_func = gen_nn_ops.max_pool_v2 if v2 else nn_ops.max_pool
with self.test_session(use_gpu=use_gpu):
input_tensor = variables.Variable(
np.array(input_data, dtype=np.float32).reshape(input_sizes))
variables.global_variables_initializer().run()
output_tensor = pool_func(input_tensor, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
output_backprop_tensor = constant_op.constant(
output_backprop, shape=output_sizes)
input_backprop_tensor = self._MaxPoolGrad(
input_tensor, output_tensor, output_backprop_tensor, window_rows,
window_cols, row_stride, col_stride, padding, v2)
actual_input_backprop = input_backprop_tensor.eval()
self.assertShapeEqual(actual_input_backprop, input_backprop_tensor)
actual_input_backprop = actual_input_backprop.flatten()
actual_input_backprop = self._GetNdArray(actual_input_backprop)
actual_output = output_tensor.eval().flatten()
actual_output = self._GetNdArray(actual_output)
self.assertAllClose(
expected_input_backprop, actual_input_backprop, rtol=1e-6, atol=1e-6)
def _testMaxPoolGradDirect1_1(self):
input_data = [
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradDirect1_2(self):
input_data = [
1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0,
0.0, 1.0
]
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
expected_input_backprop = [
11.0, 0.0, 25.0, 0.0, 0.0, 31.0, 0.0, 17.0, 19.0, 0.0, 41.0, 0.0, 0.0,
0.0, 0.0, 0.0
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradDirect1_3(self):
input_data = [
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
0.0,
1.0,
]
output_backprop = [
11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0,
23.0, 24.0, 25.0, 26.0
]
expected_input_backprop = [
54,
0.0,
62,
0.0,
0.0,
60,
0.0,
22.0,
47,
0.0,
51,
0.0,
0.0,
0.0,
0.0,
0.0,
]
for use_gpu in True, False:
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 4, 4, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
use_gpu=use_gpu,
v2=v2)
def _testMaxPoolGradDirectWithNans2_1(self):
input_data = [float("nan")] * 16
output_backprop = [11.0, 12.0, 13.0, 15.0, 16.0, 17.0, 19.0, 20.0, 21.0]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
11.0, 12.0, 13.0, 0.0, 15.0, 16.0, 17.0, 0.0, 19.0, 20.0, 21.0, 0.0,
0.0, 0.0, 0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=False,
v2=v2)
if not test.is_gpu_available():
return
# Test the GPU implementation that uses cudnn for now.
saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP")
# Do not propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0"
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
# Propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1"
expected_input_backprop_cudnn = expected_input_backprop_tf_cpu
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
if saved_nanprop:
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop
else:
del os.environ["TF_ENABLE_MAXPOOL_NANPROP"]
def _testMaxPoolGradDirectWithNans2_2(self):
input_data = [float("nan")] * 16
output_backprop = [
float("nan"), 12.0, 13.0, 15.0,
float("nan"), 17.0, 19.0, 20.0,
float("nan")
]
# Test the CPU implementation, which propagates diffs in case of NaN
expected_input_backprop_tf_cpu = [
float("nan"), 12.0, 13.0, 0.0, 15.0,
float("nan"), 17.0, 0.0, 19.0, 20.0,
float("nan"), 0.0, 0.0, 0.0, 0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_tf_cpu,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=False,
v2=v2)
if not test.is_gpu_available():
return
# Test the GPU implementation that uses cudnn for now.
saved_nanprop = os.environ.get("TF_ENABLE_MAXPOOL_NANPROP")
# Do not propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "0"
expected_input_backprop_cudnn = [
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
# Propagate the diff in cases of NaNs
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = "1"
expected_input_backprop_cudnn = expected_input_backprop_tf_cpu
for v2 in [True, False]:
self._testMaxPoolGradDirect(
input_data,
output_backprop,
expected_input_backprop_cudnn,
input_sizes=[1, 4, 4, 1],
output_sizes=[1, 3, 3, 1],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
use_gpu=True,
v2=v2)
if saved_nanprop:
os.environ["TF_ENABLE_MAXPOOL_NANPROP"] = saved_nanprop
else:
del os.environ["TF_ENABLE_MAXPOOL_NANPROP"]
def testMaxPoolGradDirect(self):
self._testMaxPoolGradDirect1_1()
self._testMaxPoolGradDirect1_2()
self._testMaxPoolGradDirect1_3()
self._testMaxPoolGradDirectWithNans2_1()
self._testMaxPoolGradDirectWithNans2_2()
def _testMaxPoolGradGradValidPadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 3, 3, 1],
output_sizes=[1, 3, 3, 1],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_6(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 6, 6, 3],
output_sizes=[2, 5, 5, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_1_7(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 7, 7, 3],
output_sizes=[2, 6, 6, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradValidPadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding1_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding2_2(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testMaxPoolGradGradSamePadding3_1(self, data_format, use_gpu):
for pool_func in [gen_nn_ops.max_pool_v2, nn_ops.max_pool]:
self._ConstructAndTestSecondGradient(
pool_func,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def testMaxPoolGradGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testMaxPoolGradGradValidPadding1_1(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_1_6(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_1_7(data_format, use_gpu)
self._testMaxPoolGradGradValidPadding2_2(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding1_1(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding2_1(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding2_2(data_format, use_gpu)
self._testMaxPoolGradGradSamePadding3_1(data_format, use_gpu)
def _MaxPoolGradGrad(self, orig_input, orig_output, grad, window_rows,
window_cols, row_stride, col_stride, padding):
"""Max Pooling Second-Order Gradient.
Args:
orig_input: A float Tensor. The original input tensor.
orig_output: A float Tensor. The original output tensor.
grad: A float Tensor.
The 4D (batch x out_rows x out_cols x depth) output backprop.
window_rows: integer. Kernel size along rows dimension.
window_cols: integer. Kernel size along cols dimension.
row_stride: integer. Stride along rows dimension
col_stride: integer. Stride along cols dimension
padding: PoolingOpDef.Padding. Padding type.
Returns:
A Tensor.
"""
return gen_nn_ops.max_pool_grad_grad(
orig_input, orig_output, grad, [1, window_rows, window_cols, 1],
[1, row_stride, col_stride, 1], padding)
def testAvgPoolGrad(self):
for (data_format, use_gpu) in GetTestConfigs():
self._testAvgPoolGradValidPadding1_1(data_format, use_gpu)
self._testAvgPoolGradValidPadding1_2(data_format, use_gpu)
self._testAvgPoolGradValidPadding2_1(data_format, use_gpu)
self._testAvgPoolGradValidPadding2_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding1_1(data_format, use_gpu)
self._testAvgPoolGradSamePadding1_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding2_1(data_format, use_gpu)
self._testAvgPoolGradSamePadding2_2(data_format, use_gpu)
self._testAvgPoolGradSamePadding3_1(data_format, use_gpu)
def _testAvgPoolGradValidPadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 3, 3, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding1_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 3, 3, 3],
output_sizes=[2, 2, 2, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradValidPadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 2, 3],
output_sizes=[2, 1, 1, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="VALID",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=1,
window_cols=1,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding1_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=1,
window_cols=1,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 2, 4, 3],
window_rows=2,
window_cols=2,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding2_2(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[2, 2, 4, 3],
output_sizes=[2, 1, 2, 3],
window_rows=2,
window_cols=2,
row_stride=2,
col_stride=2,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def _testAvgPoolGradSamePadding3_1(self, data_format, use_gpu):
self._ConstructAndTestGradient(
nn_ops.avg_pool,
input_sizes=[1, 7, 7, 1],
output_sizes=[1, 7, 7, 1],
window_rows=3,
window_cols=3,
row_stride=1,
col_stride=1,
padding="SAME",
data_format=data_format,
use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
for pool_func in [nn_ops.max_pool, nn_ops.avg_pool]:
p = pool_func(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
p, am = nn_ops.max_pool_with_argmax(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], p.get_shape().as_list())
self.assertEqual([None, None, None, None], am.get_shape().as_list())
# Incorrect input shape.
for pool_func in [
nn_ops.max_pool, nn_ops.avg_pool, nn_ops.max_pool_with_argmax
]:
with self.assertRaises(ValueError):
pool_func(
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
ksize=[1, 1, 1, 1],
strides=[1, 1, 1, 1],
padding="SAME")
def testOpEdgeCases(self):
with self.test_session(use_gpu=test.is_gpu_available()) as sess:
pool_funcs = [nn_ops.max_pool, nn_ops.avg_pool]
if test.is_gpu_available():
pool_funcs.append(nn_ops.max_pool_with_argmax)
for pool_func in pool_funcs:
if pool_func != nn_ops.max_pool:
# Illegal strides.
with self.assertRaisesRegexp(
errors_impl.UnimplementedError,
"Pooling is not yet supported on the batch"):
sess.run(
pool_func(
array_ops.placeholder(dtypes.float32),
ksize=[1, 1, 1, 1],
strides=[2, 1, 1, 1],
padding="SAME"))
# Filter too large.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
pool_func(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 20, 21, 1],
strides=[1, 1, 1, 1],
padding="VALID"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
pool_func(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
ksize=[1, 21, 20, 1],
strides=[1, 1, 1, 1],
padding="VALID")
def GetMaxPoolFwdTest(input_size, filter_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingFwd(input_size, filter_size, strides, padding)
return Test
def GetMaxPoolGradTest(input_size, filter_size, output_size, strides, padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingBk(input_size, output_size, filter_size, strides,
padding)
return Test
def GetMaxPoolGradGradTest(input_size, filter_size, output_size, strides,
padding):
def Test(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
self._CompareMaxPoolingGradBk(input_size, output_size, filter_size, strides,
padding)
return Test
if __name__ == "__main__":
for (name_, input_size_, filter_size_, output_size_, stride_,
padding_) in GetShrunkInceptionMaxPoolShapes():
setattr(PoolingTest, "testMaxPoolFwd_" + name_,
GetMaxPoolFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(PoolingTest, "testMaxPoolGrad_" + name_,
GetMaxPoolGradTest(input_size_, filter_size_, output_size_, stride_,
padding_))
setattr(PoolingTest, "testMaxPoolGradGrad_" + name_,
GetMaxPoolGradGradTest(input_size_, filter_size_, output_size_,
stride_, padding_))
test.main()
|
dsajkl/123
|
refs/heads/master
|
common/test/acceptance/pages/studio/settings_group_configurations.py
|
10
|
"""
Course Group Configurations page.
"""
from bok_choy.promise import EmptyPromise
from .course_page import CoursePage
from .utils import confirm_prompt
class GroupConfigurationsPage(CoursePage):
"""
Course Group Configurations page.
"""
url_path = "group_configurations"
def is_browser_on_page(self):
"""
Verify that the browser is on the page and it is not still loading.
"""
EmptyPromise(
lambda: self.q(css='body.view-group-configurations').present,
'On the group configuration page'
).fulfill()
EmptyPromise(
lambda: not self.q(css='span.spin').visible,
'Group Configurations are finished loading'
).fulfill()
return True
@property
def group_configurations(self):
"""
Return list of the group configurations for the course.
"""
css = '.group-configurations-list-item'
return [GroupConfiguration(self, index) for index in xrange(len(self.q(css=css)))]
def create(self):
"""
Creates new group configuration.
"""
self.q(css=".new-button").first.click()
@property
def no_group_configuration_message_is_present(self):
return self.q(css='.wrapper-content .no-group-configurations-content').present
@property
def no_group_configuration_message_text(self):
return self.q(css='.wrapper-content .no-group-configurations-content').text[0]
class GroupConfiguration(object):
"""
Group Configuration wrapper.
"""
def __init__(self, page, index):
self.page = page
self.SELECTOR = '.group-configurations-list-item-{}'.format(index)
self.index = index
def get_selector(self, css=''):
return ' '.join([self.SELECTOR, css])
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.get_selector(css=selector))
def toggle(self):
"""
Expand/collapse group configuration.
"""
self.find_css('a.group-toggle').first.click()
@property
def is_expanded(self):
"""
Group configuration usage information is expanded.
"""
return self.find_css('a.group-toggle.hide-groups').present
def add_group(self):
"""
Add new group.
"""
self.find_css('button.action-add-group').first.click()
def get_text(self, css):
"""
Return text for the defined by css locator.
"""
return self.find_css(css).first.text[0]
def click_outline_anchor(self):
"""
Click on the `Course Outline` link.
"""
self.find_css('p.group-configuration-usage-text a').first.click()
def click_unit_anchor(self, index=0):
"""
Click on the link to the unit.
"""
self.find_css('li.group-configuration-usage-unit a').nth(index).click()
def edit(self):
"""
Open editing view for the group configuration.
"""
self.find_css('.action-edit .edit').first.click()
@property
def delete_button_is_disabled(self):
return self.find_css('.actions .delete.is-disabled').present
@property
def delete_button_is_absent(self):
return not self.find_css('.actions .delete').present
def delete(self):
"""
Delete the group configuration.
"""
self.find_css('.actions .delete').first.click()
confirm_prompt(self.page)
def save(self):
"""
Save group configuration.
"""
self.find_css('.action-primary').first.click()
self.page.wait_for_ajax()
def cancel(self):
"""
Cancel group configuration.
"""
self.find_css('.action-secondary').first.click()
@property
def mode(self):
"""
Return group configuration mode.
"""
if self.find_css('.group-configuration-edit').present:
return 'edit'
elif self.find_css('.group-configuration-details').present:
return 'details'
@property
def id(self):
"""
Return group configuration id.
"""
return self.get_text('.group-configuration-id .group-configuration-value')
@property
def validation_message(self):
"""
Return validation message.
"""
return self.get_text('.message-status.error')
@property
def usages(self):
"""
Return list of usages.
"""
css = '.group-configuration-usage-unit'
return self.find_css(css).text
@property
def name(self):
"""
Return group configuration name.
"""
return self.get_text('.group-configuration-title')
@name.setter
def name(self, value):
"""
Set group configuration name.
"""
self.find_css('.group-configuration-name-input').first.fill(value)
@property
def description(self):
"""
Return group configuration description.
"""
return self.get_text('.group-configuration-description')
@description.setter
def description(self, value):
"""
Set group configuration description.
"""
self.find_css('.group-configuration-description-input').first.fill(value)
@property
def groups(self):
"""
Return list of groups.
"""
def group_selector(group_index):
return self.get_selector('.group-{} '.format(group_index))
return [Group(self.page, group_selector(index)) for index, element in enumerate(self.find_css('.group'))]
@property
def delete_note(self):
"""
Return delete note for the group configuration.
"""
return self.find_css('.wrapper-delete-button').first.attrs('data-tooltip')[0]
@property
def details_error_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .icon-exclamation-sign').present
@property
def details_warning_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .icon-warning-sign').present
@property
def details_message_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .group-configuration-validation-message').present
@property
def details_message_text(self):
return self.find_css('.wrapper-group-configuration-usages .group-configuration-validation-message').text[0]
@property
def edit_warning_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-validation .icon-warning-sign').present
@property
def edit_warning_message_is_present(self):
return self.find_css('.wrapper-group-configuration-validation .group-configuration-validation-text').present
@property
def edit_warning_message_text(self):
return self.find_css('.wrapper-group-configuration-validation .group-configuration-validation-text').text[0]
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
class Group(object):
"""
Group wrapper.
"""
def __init__(self, page, prefix_selector):
self.page = page
self.prefix = prefix_selector
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.prefix + selector)
@property
def name(self):
"""
Return the name of the group .
"""
css = '.group-name'
return self.find_css(css).first.text[0]
@name.setter
def name(self, value):
"""
Set the name for the group.
"""
css = '.group-name'
self.find_css(css).first.fill(value)
@property
def allocation(self):
"""
Return allocation for the group.
"""
css = '.group-allocation'
return self.find_css(css).first.text[0]
def remove(self):
"""
Remove the group.
"""
css = '.action-close'
return self.find_css(css).first.click()
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
|
agati/chimera
|
refs/heads/master
|
src/chimera/core/tests/managerhelperwithinitexception.py
|
6
|
from chimera.core.chimeraobject import ChimeraObject
class ManagerHelperWithInitException (ChimeraObject):
def __init__ (self):
ChimeraObject.__init__(self)
raise Exception("oops in __init__")
def __start__ (self):
return True
def foo (self):
return 42
|
vpodzime/pyparted
|
refs/heads/master
|
tests/test__ped_constraint.py
|
6
|
#
# Copyright (C) 2009-2011 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
# David Cantrell <dcantrell@redhat.com>
#
import _ped
from tests.baseclass import RequiresDevice
# One class per method, multiple tests per class. For these simple methods,
# that seems like good organization. More complicated methods may require
# multiple classes and their own test suite.
class ConstraintNewTestCase(RequiresDevice):
def runTest(self):
align1 = _ped.Alignment(10, 5)
align2 = _ped.Alignment(10, 5)
geom1 = _ped.Geometry(self._device, 0, 50)
geom2 = _ped.Geometry(self._device, 25, 50)
# Check that not passing enough args to _ped.Constraint.__init__ is caught.
self.assertRaises(TypeError, _ped.Constraint)
self.assertRaises(TypeError, _ped.Constraint, align1, align2)
# Or the parameters in the wrong order.
self.assertRaises(TypeError, _ped.Constraint, align1, align2, 10, 100,
geom1, geom2)
# And then the correct way of creating a _ped.Constraint.
c = _ped.Constraint(align1, align2, geom1, geom2, 10, 100)
self.assertIsInstance(c, _ped.Constraint)
class ConstraintGetSetTestCase(RequiresDevice):
def setUp(self):
RequiresDevice.setUp(self)
align1 = _ped.Alignment(10, 5)
align2 = _ped.Alignment(10, 5)
geom1 = _ped.Geometry(self._device, 0, 50)
geom2 = _ped.Geometry(self._device, 25, 50)
self.c = _ped.Constraint(align1, align2, geom1, geom2, min_size=10,
max_size=100)
def runTest(self):
# Test that passing the kwargs to __init__ works.
self.assertEqual(self.c.min_size, 10)
self.assertEqual(self.c.max_size, 100)
self.assertIsInstance(self.c.start_align, _ped.Alignment)
self.assertIsInstance(self.c.end_align, _ped.Alignment)
self.assertIsInstance(self.c.start_range, _ped.Geometry)
self.assertIsInstance(self.c.end_range, _ped.Geometry)
# Test that setting directly and getting with getattr works.
self.c.min_size = 15
self.c.max_size = 75
self.assertEqual(getattr(self.c, "min_size"), 15)
self.assertEqual(getattr(self.c, "max_size"), 75)
self.assertIsInstance(getattr(self.c, "start_align"), _ped.Alignment)
self.assertIsInstance(getattr(self.c, "end_align"), _ped.Alignment)
self.assertIsInstance(getattr(self.c, "start_range"), _ped.Geometry)
self.assertIsInstance(getattr(self.c, "end_range"), _ped.Geometry)
# Test that setting with setattr and getting directly works.
setattr(self.c, "min_size", 10)
setattr(self.c, "max_size", 90)
self.assertEqual(self.c.min_size, 10)
self.assertEqual(self.c.max_size, 90)
# Test that values have the right type.
self.assertRaises(TypeError, setattr, self.c, "min_size", "string")
# Test that looking for invalid attributes fails properly.
self.assertRaises(AttributeError, getattr, self.c, "blah")
# We really shouldn't be allowed to overwrite objects stored in a
# _ped.Constraint, but for now there's no way to prevent it.
self.c.end_range = 47
self.assertEqual(self.c.end_range, 47)
class ConstraintDuplicateTestCase(RequiresDevice):
def setUp(self):
RequiresDevice.setUp(self)
align1 = _ped.Alignment(10, 0)
align2 = _ped.Alignment(10, 0)
geom1 = _ped.Geometry(self._device, 0, 50)
geom2 = _ped.Geometry(self._device, 25, 50)
self.c = _ped.Constraint(align1, align2, geom1, geom2, min_size=10,
max_size=100)
self.dup = self.c.duplicate()
def runTest(self):
self.assertEqual(self.c.min_size, self.dup.min_size)
self.assertEqual(self.c.max_size, self.dup.max_size)
# duplicate methods should do a deepcopy, so self.dup should have
# different references, but the same contents.
self.assertNotEqual(repr(self.c), repr(self.dup))
self.assertNotEqual(repr(self.c.start_align), repr(self.dup.start_align))
self.assertEqual(self.c.start_align.offset, self.dup.start_align.offset)
self.assertEqual(self.c.start_align.grain_size, self.dup.start_align.grain_size)
self.assertNotEqual(repr(self.c.end_align), repr(self.dup.end_align))
self.assertEqual(self.c.end_align.offset, self.dup.end_align.offset)
self.assertEqual(self.c.end_align.grain_size, self.dup.end_align.grain_size)
self.assertNotEqual(repr(self.c.start_range), repr(self.dup.start_range))
self.assertNotEqual(repr(self.c.start_range.dev), repr(self.dup.start_range.dev))
self.assertEqual(self.c.start_range.dev.path, self.dup.start_range.dev.path)
self.assertEqual(self.c.start_range.start, self.dup.start_range.start)
self.assertEqual(self.c.start_range.length, self.dup.start_range.length)
self.assertEqual(self.c.start_range.end, self.dup.start_range.end)
self.assertNotEqual(repr(self.c.end_range), repr(self.dup.end_range))
self.assertNotEqual(repr(self.c.end_range.dev), repr(self.dup.end_range.dev))
self.assertEqual(self.c.end_range.dev.path, self.dup.end_range.dev.path)
self.assertEqual(self.c.end_range.start, self.dup.end_range.start)
self.assertEqual(self.c.end_range.length, self.dup.end_range.length)
self.assertEqual(self.c.end_range.end, self.dup.end_range.end)
class ConstraintIntersectTestCase(RequiresDevice):
def setUp(self):
RequiresDevice.setUp(self)
align1 = _ped.Alignment(10, 0)
align2 = _ped.Alignment(10, 0)
geom1 = _ped.Geometry(self._device, 0, 50)
geom2 = _ped.Geometry(self._device, 25, 50)
self.c1 = _ped.Constraint(align1, align2, geom1, geom2, min_size=10,
max_size=100)
geom3 = _ped.Geometry(self._device, 10, 50)
geom4 = _ped.Geometry(self._device, 30, 40)
self.c2 = _ped.Constraint(align1, align2, geom3, geom4, min_size=10,
max_size=100)
def runTest(self):
startAlign = self.c1.start_align.intersect(self.c2.start_align)
endAlign = self.c1.end_align.intersect(self.c2.end_align)
startRange = self.c1.start_range.intersect(self.c2.start_range)
endRange = self.c1.end_range.intersect(self.c2.end_range)
minSize = max(self.c1.min_size, self.c2.min_size)
maxSize = min(self.c1.max_size, self.c2.max_size)
if not startAlign or not endAlign or not startRange or not endRange:
expected = None
else:
expected = _ped.Constraint(startAlign, endAlign,
startRange, endRange,
min_size=minSize, max_size=maxSize)
result = self.c1.intersect(self.c2)
self.assertEqual(result, expected)
class ConstraintSolveMaxTestCase(RequiresDevice):
def setUp(self):
RequiresDevice.setUp(self)
self.c1 = self._device.get_constraint()
def runTest(self):
result = self.c1.solve_max()
self.assertEqual(result.dev, self._device)
self.assertGreaterEqual(result.length, self._device.length - 1)
class ConstraintSolveNearestTestCase(RequiresDevice):
def setUp(self):
RequiresDevice.setUp(self)
self.c1 = self._device.get_constraint()
self.g1 = _ped.Geometry(self._device, 1, 8)
def runTest(self):
result = self.c1.solve_nearest(self.g1)
self.assertEqual(result, self.g1)
class ConstraintIsSolutionTestCase(RequiresDevice):
def setUp(self):
RequiresDevice.setUp(self)
self.c1 = self._device.get_constraint()
self.g1 = _ped.Geometry(self._device, 1, 8)
def runTest(self):
self.assertTrue(self.c1.is_solution(self.g1))
class ConstraintStrTestCase(RequiresDevice):
def setUp(self):
RequiresDevice.setUp(self)
align1 = _ped.Alignment(10, 0)
align2 = _ped.Alignment(10, 0)
geom1 = _ped.Geometry(self._device, 0, 50)
geom2 = _ped.Geometry(self._device, 25, 50)
self.c1 = _ped.Constraint(align1, align2, geom1, geom2, min_size=10,
max_size=100)
def runTest(self):
result = str(self.c1).split('\n')
self.assertEqual(result[0], '_ped.Constraint instance --')
self.assertTrue(result[1].startswith(' start_align: <_ped.Alignment object at '))
self.assertNotEqual(result[1].find(' end_align: <_ped.Alignment object at '), -1)
self.assertTrue(result[2].startswith(' start_range: <_ped.Geometry object at '))
self.assertNotEqual(result[2].find(' end_range: <_ped.Geometry object at '), -1)
self.assertEqual(result[3], ' min_size: 10 max_size: 100')
|
openstack/octavia
|
refs/heads/master
|
octavia/controller/worker/v2/flows/pool_flows.py
|
1
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from taskflow.patterns import linear_flow
from octavia.common import constants
from octavia.controller.worker.v2.tasks import amphora_driver_tasks
from octavia.controller.worker.v2.tasks import database_tasks
from octavia.controller.worker.v2.tasks import lifecycle_tasks
class PoolFlows(object):
def get_create_pool_flow(self):
"""Create a flow to create a pool
:returns: The flow for creating a pool
"""
create_pool_flow = linear_flow.Flow(constants.CREATE_POOL_FLOW)
create_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask(
requires=[constants.POOL_ID,
constants.LISTENERS,
constants.LOADBALANCER]))
create_pool_flow.add(database_tasks.MarkPoolPendingCreateInDB(
requires=constants.POOL_ID))
create_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER_ID))
create_pool_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL_ID))
create_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=(constants.LOADBALANCER_ID, constants.LISTENERS)))
return create_pool_flow
def get_delete_pool_flow(self):
"""Create a flow to delete a pool
:returns: The flow for deleting a pool
"""
delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW)
delete_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask(
requires=[constants.POOL_ID,
constants.LISTENERS,
constants.LOADBALANCER]))
delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB(
requires=constants.POOL_ID))
delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota(
requires=constants.POOL_ID, provides=constants.POOL_CHILD_COUNT))
delete_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER_ID))
delete_pool_flow.add(database_tasks.DeletePoolInDB(
requires=constants.POOL_ID))
delete_pool_flow.add(database_tasks.DecrementPoolQuota(
requires=[constants.PROJECT_ID, constants.POOL_CHILD_COUNT]))
delete_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=(constants.LOADBALANCER_ID, constants.LISTENERS)))
return delete_pool_flow
def get_delete_pool_flow_internal(self, pool_id):
"""Create a flow to delete a pool, etc.
:returns: The flow for deleting a pool
"""
delete_pool_flow = linear_flow.Flow(constants.DELETE_POOL_FLOW + '-' +
pool_id)
# health monitor should cascade
# members should cascade
delete_pool_flow.add(database_tasks.MarkPoolPendingDeleteInDB(
name='mark_pool_pending_delete_in_db_' + pool_id,
requires=constants.POOL_ID,
inject={constants.POOL_ID: pool_id}))
delete_pool_flow.add(database_tasks.CountPoolChildrenForQuota(
name='count_pool_children_for_quota_' + pool_id,
requires=constants.POOL_ID,
provides=constants.POOL_CHILD_COUNT,
inject={constants.POOL_ID: pool_id}))
delete_pool_flow.add(database_tasks.DeletePoolInDB(
name='delete_pool_in_db_' + pool_id,
requires=constants.POOL_ID,
inject={constants.POOL_ID: pool_id}))
delete_pool_flow.add(database_tasks.DecrementPoolQuota(
name='decrement_pool_quota_' + pool_id,
requires=[constants.PROJECT_ID, constants.POOL_CHILD_COUNT]))
return delete_pool_flow
def get_update_pool_flow(self):
"""Create a flow to update a pool
:returns: The flow for updating a pool
"""
update_pool_flow = linear_flow.Flow(constants.UPDATE_POOL_FLOW)
update_pool_flow.add(lifecycle_tasks.PoolToErrorOnRevertTask(
requires=[constants.POOL_ID,
constants.LISTENERS,
constants.LOADBALANCER]))
update_pool_flow.add(database_tasks.MarkPoolPendingUpdateInDB(
requires=constants.POOL_ID))
update_pool_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=constants.LOADBALANCER_ID))
update_pool_flow.add(database_tasks.UpdatePoolInDB(
requires=[constants.POOL_ID, constants.UPDATE_DICT]))
update_pool_flow.add(database_tasks.MarkPoolActiveInDB(
requires=constants.POOL_ID))
update_pool_flow.add(database_tasks.MarkLBAndListenersActiveInDB(
requires=(constants.LOADBALANCER_ID, constants.LISTENERS)))
return update_pool_flow
|
nijinashok/sos
|
refs/heads/master
|
sos/plugins/lustre.py
|
1
|
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin
class Lustre(Plugin, RedHatPlugin):
'''Lustre filesystem'''
plugin_name = 'lustre'
profiles = ('storage', 'network', 'cluster', )
packages = ('lustre', 'lustre-client', )
def get_params(self, name, param_list):
'''Use lctl get_param to collect a selection of parameters into a
file.
'''
self.add_cmd_output("lctl get_param %s" % " ".join(param_list),
suggest_filename="params-%s" % name,
stderr=False)
def setup(self):
self.add_cmd_output([
"lctl debug_kernel",
"lctl device_list",
"lctl list_nids",
"lctl route_list",
"lnetctl net show -v"
])
self.get_params("basic", ["version", "health_check", "debug"])
self.get_params("lnet", ["peers", "routes", "routers", "nis"])
self.get_params("ldlm-states", ["*.*.state"])
self.get_params("jobid", ["jobid_name", "jobid_var"])
# Client Specific
self.add_cmd_output([
"lfs df",
"lfs df -i"
])
# Server Specific
self.get_params("osd", ["osd-*.*.{mntdev,files*," +
"kbytes*,blocksize,brw_stats}"])
self.get_params("quota", ["osd-*.*.quota_slave." +
"{info,limit_*,acct_*}"])
# vim: set et ts=4 sw=4 :
|
lidavidm/mathics-heroku
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/vendor/html5lib/treebuilders/etree.py
|
79
|
from __future__ import absolute_import, division, unicode_literals
from pip.vendor.six import text_type
import re
from . import _base
from .. import ihatexml
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation, fullTree=False):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class Element(_base.Node):
def __init__(self, name, namespace=None):
self._name = name
self._namespace = namespace
self._element = ElementTree.Element(self._getETreeTag(name,
namespace))
if namespace is None:
self.nameTuple = namespaces["html"], self._name
else:
self.nameTuple = self._namespace, self._name
self.parent = None
self._childNodes = []
self._flags = []
def _getETreeTag(self, name, namespace):
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
return etree_tag
def _setName(self, name):
self._name = name
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getName(self):
return self._name
name = property(_getName, _setName)
def _setNamespace(self, namespace):
self._namespace = namespace
self._element.tag = self._getETreeTag(self._name, self._namespace)
def _getNamespace(self):
return self._namespace
namespace = property(_getNamespace, _setNamespace)
def _getAttributes(self):
return self._element.attrib
def _setAttributes(self, attributes):
# Delete existing attributes first
# XXX - there may be a better way to do this...
for key in list(self._element.attrib.keys()):
del self._element.attrib[key]
for key, value in attributes.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], key[1])
else:
name = key
self._element.set(name, value)
attributes = property(_getAttributes, _setAttributes)
def _getChildNodes(self):
return self._childNodes
def _setChildNodes(self, value):
del self._element[:]
self._childNodes = []
for element in value:
self.insertChild(element)
childNodes = property(_getChildNodes, _setChildNodes)
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self._element.text or len(self._element))
def appendChild(self, node):
self._childNodes.append(node)
self._element.append(node._element)
node.parent = self
def insertBefore(self, node, refNode):
index = list(self._element).index(refNode._element)
self._element.insert(index, node._element)
node.parent = self
def removeChild(self, node):
self._element.remove(node._element)
node.parent = None
def insertText(self, data, insertBefore=None):
if not(len(self._element)):
if not self._element.text:
self._element.text = ""
self._element.text += data
elif insertBefore is None:
# Insert the text as the tail of the last child element
if not self._element[-1].tail:
self._element[-1].tail = ""
self._element[-1].tail += data
else:
# Insert the text before the specified node
children = list(self._element)
index = children.index(insertBefore._element)
if index > 0:
if not self._element[index - 1].tail:
self._element[index - 1].tail = ""
self._element[index - 1].tail += data
else:
if not self._element.text:
self._element.text = ""
self._element.text += data
def cloneNode(self):
element = type(self)(self.name, self.namespace)
for name, value in self.attributes.items():
element.attributes[name] = value
return element
def reparentChildren(self, newParent):
if newParent.childNodes:
newParent.childNodes[-1]._element.tail += self._element.text
else:
if not newParent._element.text:
newParent._element.text = ""
if self._element.text is not None:
newParent._element.text += self._element.text
self._element.text = ""
_base.Node.reparentChildren(self, newParent)
class Comment(Element):
def __init__(self, data):
# Use the superclass constructor to set all properties on the
# wrapper element
self._element = ElementTree.Comment(data)
self.parent = None
self._childNodes = []
self._flags = []
def _getData(self):
return self._element.text
def _setData(self, value):
self._element.text = value
data = property(_getData, _setData)
class DocumentType(Element):
def __init__(self, name, publicId, systemId):
Element.__init__(self, "<!DOCTYPE>")
self._element.text = name
self.publicId = publicId
self.systemId = systemId
def _getPublicId(self):
return self._element.get("publicId", "")
def _setPublicId(self, value):
if value is not None:
self._element.set("publicId", value)
publicId = property(_getPublicId, _setPublicId)
def _getSystemId(self):
return self._element.get("systemId", "")
def _setSystemId(self, value):
if value is not None:
self._element.set("systemId", value)
systemId = property(_getSystemId, _setSystemId)
class Document(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_ROOT")
class DocumentFragment(Element):
def __init__(self):
Element.__init__(self, "DOCUMENT_FRAGMENT")
def testSerializer(element):
rv = []
def serializeElement(element, indent=0):
if not(hasattr(element, "tag")):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
rv.append("#document")
if element.text is not None:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
elif element.tag == ElementTreeCommentType:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
else:
assert isinstance(element.tag, text_type), \
"Expected unicode, got %s, %s" % (type(element.tag), element.tag)
nsmatch = tag_regexp.match(element.tag)
if nsmatch is None:
name = element.tag
else:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
name = "%s %s" % (prefix, name)
rv.append("|%s<%s>" % (' ' * indent, name))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = name
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
filter = ihatexml.InfosetFilter()
def serializeElement(element):
if isinstance(element, ElementTree.ElementTree):
element = element.getroot()
if element.tag == "<!DOCTYPE>":
if element.get("publicId") or element.get("systemId"):
publicId = element.get("publicId") or ""
systemId = element.get("systemId") or ""
rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
(element.text, publicId, systemId))
else:
rv.append("<!DOCTYPE %s>" % (element.text,))
elif element.tag == "DOCUMENT_ROOT":
if element.text is not None:
rv.append(element.text)
if element.tail is not None:
raise TypeError("Document node cannot have tail")
if hasattr(element, "attrib") and len(element.attrib):
raise TypeError("Document node cannot have attributes")
for child in element:
serializeElement(child)
elif element.tag == ElementTreeCommentType:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (filter.fromXmlName(element.tag),))
else:
attr = " ".join(["%s=\"%s\"" % (
filter.fromXmlName(name), value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = Comment
fragmentClass = DocumentFragment
implementation = ElementTreeImplementation
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._element
else:
if self.defaultNamespace is not None:
return self.document._element.find(
"{%s}html" % self.defaultNamespace)
else:
return self.document._element.find("html")
def getFragment(self):
return _base.TreeBuilder.getFragment(self)._element
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
|
sbellem/django
|
refs/heads/master
|
django/core/urlresolvers.py
|
59
|
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a tuple in this format:
(view_function, function_args, function_kwargs)
"""
from __future__ import unicode_literals
import functools
import re
import warnings
from importlib import import_module
from threading import local
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.utils import lru_cache, six
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import cached_property, lazy
from django.utils.http import RFC3986_SUBDELIMS, urlquote
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import get_language, override
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function-based view
self._func_path = '.'.join([func.__module__, func.__name__])
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
@lru_cache.lru_cache(maxsize=None)
def get_callable(lookup_view, can_fail=False):
"""
Return a callable corresponding to lookup_view. This function is used
by both resolve() and reverse(), so can_fail allows the caller to choose
between returning the input as is and raising an exception when the input
string can't be interpreted as an import path.
If lookup_view is already a callable, return it.
If lookup_view is a string import path that can be resolved to a callable,
import that callable and return it.
If lookup_view is some other kind of string and can_fail is True, the string
is returned as is. If can_fail is False, an exception is raised (either
ImportError or ViewDoesNotExist).
"""
if callable(lookup_view):
return lookup_view
if not isinstance(lookup_view, six.string_types):
raise ViewDoesNotExist(
"'%s' is not a callable or a dot-notation path" % lookup_view
)
mod_name, func_name = get_mod_func(lookup_view)
if not func_name: # No '.' in lookup_view
if can_fail:
return lookup_view
else:
raise ImportError(
"Could not import '%s'. The path must be fully qualified." %
lookup_view)
try:
mod = import_module(mod_name)
except ImportError:
if can_fail:
return lookup_view
else:
parentmod, submod = get_mod_func(mod_name)
if submod and not module_has_submodule(import_module(parentmod), submod):
raise ViewDoesNotExist(
"Could not import '%s'. Parent module %s does not exist." %
(lookup_view, mod_name))
else:
raise
else:
try:
view_func = getattr(mod, func_name)
except AttributeError:
if can_fail:
return lookup_view
else:
raise ViewDoesNotExist(
"Could not import '%s'. View does not exist in module %s." %
(lookup_view, mod_name))
else:
if not callable(view_func):
# For backwards compatibility this is raised regardless of can_fail
raise ViewDoesNotExist(
"Could not import '%s.%s'. View is not callable." %
(mod_name, func_name))
return view_func
@lru_cache.lru_cache(maxsize=None)
def get_resolver(urlconf):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@lru_cache.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or not hasattr(self, '_callback_str'):
return
self._callback_str = prefix + '.' + self._callback_str
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@property
def callback(self):
if self._callback is not None:
return self._callback
self._callback = get_callable(self._callback_str)
return self._callback
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return str('<%s %s (%s:%s) %s>') % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
if hasattr(pattern, '_callback_str'):
self._callback_strs.add(pattern._callback_str)
elif hasattr(pattern, '_callback'):
callback = pattern._callback
if isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
lookup_str = callback.__module__ + "." + callback.__class__.__name__
else:
lookup_str = callback.__module__ + "." + callback.__name__
self._callback_strs.add(lookup_str)
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent_pat = pattern.regex.pattern
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
dict(defaults, **pattern.default_kwargs),
)
)
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
self._callback_strs.update(pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._populated = True
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = force_text(path) # path may be a reverse_lazy object
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named groups.
# Otherwise, pass all non-named arguments as positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = match.groups() + sub_match.args
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name] + sub_match.app_names,
[self.namespace] + sub_match.namespaces
)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, six.string_types):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included urlconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use default
# Lazy import, since django.urls imports this file
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [force_text(v) for v in args]
text_kwargs = {k: force_text(v) for (k, v) in kwargs.items()}
if not self._populated:
self._populate()
original_lookup = lookup_view
try:
if self._is_callback(lookup_view):
lookup_view = get_callable(lookup_view, True)
except (ImportError, AttributeError) as e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
else:
if not callable(original_lookup) and callable(lookup_view):
warnings.warn(
'Reversing by dotted path is deprecated (%s).' % original_lookup,
RemovedInDjango110Warning, stacklevel=3
)
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, text_args))
else:
if (set(kwargs.keys()) | set(defaults.keys()) != set(params) |
set(defaults.keys())):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace('%', '%%') + result
if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs, re.UNICODE):
# safe characters from `pchar` definition of RFC 3986
url = urlquote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + str('/~:@'))
# Don't allow construction of scheme relative urls.
if url.startswith('//'):
url = '/%%2F%s' % url[2:]
return url
# lookup_view can be URL label, or dotted path, or callable, Any of
# these can be passed in at the top, but callables are not friendly in
# error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found. %d pattern(s) tried: %s" %
(lookup_view_s, args, kwargs, len(patterns), patterns))
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace)
@property
def regex(self):
language_code = get_language()
if language_code not in self._regex_dict:
regex_compiled = re.compile('^%s/' % language_code, re.UNICODE)
self._regex_dict[language_code] = regex_compiled
return self._regex_dict[language_code]
def resolve(path, urlconf=None):
if urlconf is None:
urlconf = get_urlconf()
return get_resolver(urlconf).resolve(path)
def reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None):
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
prefix = get_script_prefix()
if not isinstance(viewname, six.string_types):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
if current_app:
current_path = current_app.split(':')
current_path.reverse()
else:
current_path = None
resolved_path = []
ns_pattern = ''
while path:
ns = path.pop()
current_ns = current_path.pop() if current_path else None
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_ns and current_ns in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_ns
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
if ns != current_ns:
current_path = None
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'" %
(key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" %
key)
if ns_pattern:
resolver = get_ns_resolver(ns_pattern, resolver)
return force_text(iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs)))
reverse_lazy = lazy(reverse, six.text_type)
def clear_url_caches():
get_callable.cache_clear()
get_resolver.cache_clear()
get_ns_resolver.cache_clear()
def set_script_prefix(prefix):
"""
Sets the script prefix for the current thread.
"""
if not prefix.endswith('/'):
prefix += '/'
_prefixes.value = prefix
def get_script_prefix():
"""
Returns the currently active script prefix. Useful for client code that
wishes to construct their own URLs manually (although accessing the request
instance is normally going to be a lot cleaner).
"""
return getattr(_prefixes, "value", '/')
def clear_script_prefix():
"""
Unsets the script prefix for the current thread.
"""
try:
del _prefixes.value
except AttributeError:
pass
def set_urlconf(urlconf_name):
"""
Sets the URLconf for the current thread (overriding the default one in
settings). Set to None to revert back to the default.
"""
if urlconf_name:
_urlconfs.value = urlconf_name
else:
if hasattr(_urlconfs, "value"):
del _urlconfs.value
def get_urlconf(default=None):
"""
Returns the root URLconf to use for the current thread if it has been
changed from the default one.
"""
return getattr(_urlconfs, "value", default)
def is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
def translate_url(url, lang_code):
"""
Given a URL (absolute or relative), try to get its translated version in
the `lang_code` language (either by i18n_patterns or by translated regex).
Return the original URL if no translated version is found.
"""
parsed = urlsplit(url)
try:
match = resolve(parsed.path)
except Resolver404:
pass
else:
to_be_reversed = "%s:%s" % (match.namespace, match.url_name) if match.namespace else match.url_name
with override(lang_code):
try:
url = reverse(to_be_reversed, args=match.args, kwargs=match.kwargs)
except NoReverseMatch:
pass
else:
url = urlunsplit((parsed.scheme, parsed.netloc, url, parsed.query, parsed.fragment))
return url
|
hj3938/panda3d
|
refs/heads/master
|
direct/src/showbase/EventManager.py
|
9
|
"""Undocumented Module"""
__all__ = ['EventManager']
from MessengerGlobal import *
from direct.directnotify.DirectNotifyGlobal import *
from direct.task.TaskManagerGlobal import taskMgr
from panda3d.core import PStatCollector, EventQueue, EventHandler
class EventManager:
notify = None
# delayed import, since this is imported by the Toontown Launcher
# before the complete PandaModules have been downloaded.
PStatCollector = None
def __init__(self, eventQueue = None):
"""
Create a C++ event queue and handler
"""
# Make a notify category for this class (unless there already is one)
if (EventManager.notify == None):
EventManager.notify = directNotify.newCategory("EventManager")
self.eventQueue = eventQueue
self.eventHandler = None
self._wantPstats = None # no config at this point
def doEvents(self):
"""
Process all the events on the C++ event queue
"""
if self._wantPstats is None:
self._wantPstats = config.GetBool('pstats-eventmanager', 0)
EventManager.PStatCollector = PStatCollector
# use different methods for handling events with and without pstats tracking
# for efficiency
if self._wantPstats:
processFunc = self.processEventPstats
else:
processFunc = self.processEvent
while (not self.eventQueue.isQueueEmpty()):
processFunc(self.eventQueue.dequeueEvent())
def eventLoopTask(self, task):
"""
Process all the events on the C++ event queue
"""
self.doEvents()
messenger.send("event-loop-done")
return task.cont
def parseEventParameter(self, eventParameter):
"""
Extract the actual data from the eventParameter
"""
if (eventParameter.isInt()):
return eventParameter.getIntValue()
elif (eventParameter.isDouble()):
return eventParameter.getDoubleValue()
elif (eventParameter.isString()):
return eventParameter.getStringValue()
elif (eventParameter.isWstring()):
return eventParameter.getWstringValue()
elif (eventParameter.isTypedRefCount()):
return eventParameter.getTypedRefCountValue()
elif (eventParameter.isEmpty()):
return None
else:
# Must be some user defined type, return the ptr
# which will be downcast to that type.
return eventParameter.getPtr()
def processEvent(self, event):
"""
Process a C++ event
Duplicate any changes in processEventPstats
"""
# **************************************************************
# ******** Duplicate any changes in processEventPstats *********
# **************************************************************
# Get the event name
eventName = event.getName()
if eventName:
paramList = []
for i in range(event.getNumParameters()):
eventParameter = event.getParameter(i)
eventParameterData = self.parseEventParameter(eventParameter)
paramList.append(eventParameterData)
# Do not print the new frame debug, it is too noisy!
if (EventManager.notify.getDebug() and eventName != 'NewFrame'):
EventManager.notify.debug('received C++ event named: ' + eventName +
' parameters: ' + repr(paramList))
# **************************************************************
# ******** Duplicate any changes in processEventPstats *********
# **************************************************************
# Send the event, we used to send it with the event
# name as a parameter, but now you can use extraArgs for that
if paramList:
messenger.send(eventName, paramList)
else:
messenger.send(eventName)
# Also send the event down into C++ land
if self.eventHandler:
self.eventHandler.dispatchEvent(event)
else:
# An unnamed event from C++ is probably a bad thing
EventManager.notify.warning('unnamed event in processEvent')
def processEventPstats(self, event):
"""
Process a C++ event with pstats tracking
Duplicate any changes in processEvent
"""
# ********************************************************
# ******** Duplicate any changes in processEvent *********
# ********************************************************
# Get the event name
eventName = event.getName()
if eventName:
paramList = []
for i in range(event.getNumParameters()):
eventParameter = event.getParameter(i)
eventParameterData = self.parseEventParameter(eventParameter)
paramList.append(eventParameterData)
# Do not print the new frame debug, it is too noisy!
if (EventManager.notify.getDebug() and eventName != 'NewFrame'):
EventManager.notify.debug('received C++ event named: ' + eventName +
' parameters: ' + repr(paramList))
# Send the event, we used to send it with the event
# name as a parameter, but now you can use extraArgs for that
# ********************************************************
# ******** Duplicate any changes in processEvent *********
# ********************************************************
if self._wantPstats:
name = eventName
hyphen = name.find('-')
if hyphen >= 0:
name = name[0:hyphen]
pstatCollector = EventManager.PStatCollector('App:Show code:eventManager:' + name)
pstatCollector.start()
if self.eventHandler:
cppPstatCollector = EventManager.PStatCollector(
'App:Show code:eventManager:' + name + ':C++')
if paramList:
messenger.send(eventName, paramList)
else:
messenger.send(eventName)
# Also send the event down into C++ land
if self.eventHandler:
if self._wantPstats:
cppPstatCollector.start()
self.eventHandler.dispatchEvent(event)
# ********************************************************
# ******** Duplicate any changes in processEvent *********
# ********************************************************
if self._wantPstats:
if self.eventHandler:
cppPstatCollector.stop()
pstatCollector.stop()
else:
# An unnamed event from C++ is probably a bad thing
EventManager.notify.warning('unnamed event in processEvent')
def restart(self):
if self.eventQueue == None:
self.eventQueue = EventQueue.getGlobalEventQueue()
if self.eventHandler == None:
if self.eventQueue == EventQueue.getGlobalEventQueue():
# If we are using the global event queue, then we also
# want to use the global event handler.
self.eventHandler = EventHandler.getGlobalEventHandler()
else:
# Otherwise, we need our own event handler.
self.eventHandler = EventHandler(self.eventQueue)
taskMgr.add(self.eventLoopTask, 'eventManager')
def shutdown(self):
taskMgr.remove('eventManager')
# Flush the event queue. We do this after removing the task
# since the task removal itself might also fire off an event.
if self.eventQueue is not None:
self.eventQueue.clear()
|
romain-li/edx-platform
|
refs/heads/master
|
common/djangoapps/util/tests/test_milestones_helpers.py
|
16
|
"""
Tests for the milestones helpers library, which is the integration point for the edx_milestones API
"""
import ddt
from mock import patch
from milestones.exceptions import InvalidCourseKeyException, InvalidUserException
from util import milestones_helpers
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@patch.dict('django.conf.settings.FEATURES', {'MILESTONES_APP': False})
@ddt.ddt
class MilestonesHelpersTestCase(ModuleStoreTestCase):
"""
Main test suite for Milestones API client library
"""
CREATE_USER = False
def setUp(self):
"""
Test case scaffolding
"""
super(MilestonesHelpersTestCase, self).setUp()
self.course = CourseFactory.create(
metadata={
'entrance_exam_enabled': True,
}
)
self.user = {'id': '123'}
self.milestone = {
'name': 'Test Milestone',
'namespace': 'doesnt.matter',
'description': 'Testing Milestones Helpers Library',
}
@ddt.data(
(False, False, False),
(True, False, False),
(False, True, False),
(True, True, True),
)
def test_pre_requisite_courses_enabled(self, feature_flags):
"""
Tests is_prerequisite_courses_enabled function with a set of possible values for
ENABLE_PREREQUISITE_COURSES and MILESTONES_APP feature flags.
"""
with patch.dict("django.conf.settings.FEATURES", {
'ENABLE_PREREQUISITE_COURSES': feature_flags[0],
'MILESTONES_APP': feature_flags[1]
}):
self.assertEqual(feature_flags[2], milestones_helpers.is_prerequisite_courses_enabled())
def test_add_milestone_returns_none_when_app_disabled(self):
response = milestones_helpers.add_milestone(milestone_data=self.milestone)
self.assertIsNone(response)
def test_get_milestones_returns_none_when_app_disabled(self):
response = milestones_helpers.get_milestones(namespace="whatever")
self.assertEqual(len(response), 0)
def test_get_milestone_relationship_types_returns_none_when_app_disabled(self):
response = milestones_helpers.get_milestone_relationship_types()
self.assertEqual(len(response), 0)
def test_add_course_milestone_returns_none_when_app_disabled(self):
response = milestones_helpers.add_course_milestone(unicode(self.course.id), 'requires', self.milestone)
self.assertIsNone(response)
def test_get_course_milestones_returns_none_when_app_disabled(self):
response = milestones_helpers.get_course_milestones(unicode(self.course.id))
self.assertEqual(len(response), 0)
def test_add_course_content_milestone_returns_none_when_app_disabled(self):
response = milestones_helpers.add_course_content_milestone(
unicode(self.course.id),
'i4x://any/content/id',
'requires',
self.milestone
)
self.assertIsNone(response)
def test_get_course_content_milestones_returns_none_when_app_disabled(self):
response = milestones_helpers.get_course_content_milestones(
unicode(self.course.id),
'i4x://doesnt/matter/for/this/test',
'requires'
)
self.assertEqual(len(response), 0)
def test_remove_content_references_returns_none_when_app_disabled(self):
response = milestones_helpers.remove_content_references("i4x://any/content/id/will/do")
self.assertIsNone(response)
def test_get_namespace_choices_returns_values_when_app_disabled(self):
response = milestones_helpers.get_namespace_choices()
self.assertIn('ENTRANCE_EXAM', response)
def test_get_course_milestones_fulfillment_paths_returns_none_when_app_disabled(self):
response = milestones_helpers.get_course_milestones_fulfillment_paths(unicode(self.course.id), self.user)
self.assertIsNone(response)
def test_add_user_milestone_returns_none_when_app_disabled(self):
response = milestones_helpers.add_user_milestone(self.user, self.milestone)
self.assertIsNone(response)
def test_get_service_returns_none_when_app_disabled(self):
"""MilestonesService is None when app disabled"""
response = milestones_helpers.get_service()
self.assertIsNone(response)
@patch.dict('django.conf.settings.FEATURES', {'MILESTONES_APP': True})
def test_any_unfulfilled_milestones(self):
"""
Tests any_unfulfilled_milestones for invalid arguments with
the app enabled
"""
with self.assertRaises(InvalidCourseKeyException):
milestones_helpers.any_unfulfilled_milestones(None, self.user)
with self.assertRaises(InvalidUserException):
milestones_helpers.any_unfulfilled_milestones(self.course.id, None)
|
ckprice/bedrock
|
refs/heads/master
|
docs/conf.py
|
10
|
# -*- coding: utf-8 -*-
#
# playdoh documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 4 15:11:09 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mozilla.org'
copyright = u'2011, Mozilla'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'playdohdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'playdoh.tex', u'playdoh Documentation',
u'Mozilla', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mozilla.org', u'mozilla.org Documentation',
[u'Mozilla'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
jdemeyer/sagenb
|
refs/heads/master
|
sagenb/notebook/user_conf.py
|
6
|
# -*- coding: utf-8 -*
"""nodoctest
"""
import os, copy
import server_conf
from conf import (Configuration, POS, DESC, GROUP, TYPE, CHOICES, T_BOOL,
T_INTEGER, T_CHOICE, T_REAL, T_COLOR, T_STRING, T_LIST)
from sagenb.misc.misc import SAGENB_ROOT, get_languages
from flask.ext.babel import lazy_gettext
defaults = {'max_history_length':1000,
'default_system':'sage',
'autosave_interval':60*60, # 1 hour in seconds
'default_pretty_print': False,
'next_worksheet_id_number': -1, # not yet initialized
'language': 'default'
}
defaults_descriptions = {
'language': {
DESC : lazy_gettext('Language'),
GROUP : lazy_gettext('Appearance'),
TYPE : T_CHOICE,
CHOICES : ['default'] + get_languages(),
},
}
def UserConfiguration_from_basic(basic):
c = UserConfiguration()
c.confs = copy.copy(basic)
return c
class UserConfiguration(Configuration):
def defaults(self):
return defaults
def defaults_descriptions(self):
return defaults_descriptions
|
cloudnsru/PyCloudNS
|
refs/heads/master
|
setup.py
|
2
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md')) as f:
long_description = f.read()
setup(
name='PyCloudNS',
version='3.0.0',
author='Vyacheslav Anzhiganov',
author_email='hello@anzhiganov.com',
packages=[
'PyCloudNS'
],
install_requires=[
'requests',
],
keywords='public dns service',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
)
|
orionrobots/micropython
|
refs/heads/master
|
tests/basics/parser.py
|
62
|
# parser tests
try:
compile
except NameError:
print("SKIP")
import sys
sys.exit()
# completely empty string
# uPy and CPy differ for this case
#try:
# compile("", "stdin", "single")
#except SyntaxError:
# print("SyntaxError")
try:
compile("", "stdin", "eval")
except SyntaxError:
print("SyntaxError")
compile("", "stdin", "exec")
# empty continued line
try:
compile("\\\n", "stdin", "single")
except SyntaxError:
print("SyntaxError")
try:
compile("\\\n", "stdin", "eval")
except SyntaxError:
print("SyntaxError")
compile("\\\n", "stdin", "exec")
|
krisys/django
|
refs/heads/master
|
django/core/handlers/base.py
|
5
|
from __future__ import unicode_literals
import logging
import sys
import types
import warnings
from django import http
from django.conf import settings
from django.core import signals
from django.core.exceptions import (
MiddlewareNotUsed, PermissionDenied, SuspiciousOperation,
)
from django.db import connections, transaction
from django.http.multipartparser import MultiPartParserError
from django.urls import get_resolver, set_urlconf
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.views import debug
logger = logging.getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.conditional_content_removal,
]
def __init__(self):
self._request_middleware = None
self._view_middleware = None
self._template_response_middleware = None
self._response_middleware = None
self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if db.settings_dict['ATOMIC_REQUESTS'] and db.alias not in non_atomic_requests:
view = transaction.atomic(using=db.alias)(view)
return view
def get_exception_response(self, request, resolver, status_code, exception):
try:
callback, param_dict = resolver.resolve_error_handler(status_code)
# Unfortunately, inspect.getargspec result is not trustable enough
# depending on the callback wrapping in decorators (frequent for handlers).
# Falling back on try/except:
try:
response = callback(request, **dict(param_dict, exception=exception))
except TypeError:
warnings.warn(
"Error handlers should accept an exception parameter. Update "
"your code as this parameter will be required in Django 2.0",
RemovedInDjango20Warning, stacklevel=2
)
response = callback(request, **param_dict)
except Exception:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
set_urlconf(urlconf)
resolver = get_resolver(urlconf)
# Use a flag to check if the response was rendered to prevent
# multiple renderings or to force rendering if necessary.
response_is_rendered = False
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, 'urlconf'):
# Reset url resolver with a custom URLconf.
urlconf = request.urlconf
set_urlconf(urlconf)
resolver = get_resolver(urlconf)
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
response = self.process_exception_by_middleware(e, request)
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError(
"The view %s.%s didn't return an HttpResponse object. It "
"returned None instead." % (callback.__module__, view_name)
)
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_template_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__)
)
try:
response = response.render()
except Exception as e:
response = self.process_exception_by_middleware(e, request)
response_is_rendered = True
except http.Http404 as exc:
logger.warning(
'Not Found: %s', request.path,
extra={'status_code': 404, 'request': request},
)
if settings.DEBUG:
response = debug.technical_404_response(request, exc)
else:
response = self.get_exception_response(request, resolver, 404, exc)
except PermissionDenied as exc:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={'status_code': 403, 'request': request},
)
response = self.get_exception_response(request, resolver, 403, exc)
except MultiPartParserError as exc:
logger.warning(
'Bad request (Unable to parse request body): %s', request.path,
extra={'status_code': 400, 'request': request},
)
response = self.get_exception_response(request, resolver, 400, exc)
except SuspiciousOperation as exc:
# The request logger receives events for any problematic request
# The security logger receives events for all SuspiciousOperations
security_logger = logging.getLogger('django.security.%s' % exc.__class__.__name__)
security_logger.error(
force_text(exc),
extra={'status_code': 400, 'request': request},
)
if settings.DEBUG:
return debug.technical_500_response(request, *sys.exc_info(), status_code=400)
response = self.get_exception_response(request, resolver, 400, exc)
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except Exception: # Handle everything else.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
# Complain if the response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
response = self.apply_response_fixes(request, response)
except Exception: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
response._closable_objects.append(request)
# If the exception handler returns a TemplateResponse that has not
# been rendered, force it to be rendered.
if not response_is_rendered and callable(getattr(response, 'render', None)):
response = response.render()
return response
def process_exception_by_middleware(self, exception, request):
"""
Pass the exception to the exception middleware. If no middleware
return a response for this exception, raise it.
"""
for middleware_method in self._exception_middleware:
response = middleware_method(request, exception)
if response:
return response
raise
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error(
'Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={'status_code': 500, 'request': request},
)
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve_error_handler(500)
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
|
AlexYang1949/FuturesMeasure
|
refs/heads/master
|
data/dataSpider.py
|
1
|
#-*-coding:utf-8-*-
import datetime
from bs4 import BeautifulSoup
import re
import urllib
from data.dataProvider import DataProvider
from utils.utils import utils
from database import Database
class DataSpider():
def __init__(self,name):
self.name = name
self.tablename = utils.getShort(self.name)+'_table'
self.dataProvider = DataProvider(name)
def start(self):
c_list = self.spiderList()
for dayPrice in c_list:
if (utils.is_number(dayPrice[1]) and dayPrice[1]!=0):
self.dataProvider.writeData(dataDict=dayPrice)
#获取整个页面的价格数据
def spiderList(self):
c_list = []
count = self.getPageNum(self.getUrl(self.name))
for i in range(1, count + 1):
C_url = self.getUrl(self.name,i)
print('爬取%s,第%d页\nurl=%s'%(self.name,i,C_url))
priceList = self.getPriceList(C_url)
for priceDay in priceList:
c_list.append(priceDay)
return c_list
def getHtml(self,url):
page = urllib.urlopen(url)
html = page.read()
return html
def getPageNum(self,url):
html = self.getHtml(url)
soup = BeautifulSoup(html, 'html.parser')
string = soup.find('td',class_='tdr',attrs={'height':'30'})
result = re.findall(".*共(\d+)页.*",string.encode('utf-8'))
return int(result[0])
def getPriceList(self,url):
html = self.getHtml(url)
soup = BeautifulSoup(html, 'html.parser')
priceList = []
for dayPrice in soup.find_all('tr'):
priceModel = [priceModel.string for priceModel in dayPrice.find_all('div')]
if(len(priceModel) != 0):
priceList.append(priceModel)
return priceList
# shfe 上期所 dce 大商所 czce 郑商所 cffex 中金所
# 大商所 豆一 A 豆二 B 胶合板 BB 玉米 C 纤维板 FB
# 铁矿石 I 焦炭 J 鸡蛋 JD 焦煤 JM 塑料 L 豆粕 M
# PP PP PVC V 豆油 Y 棕榈 P
# 郑商所 棉花 CF 玻璃 FG 郑醇 MA 菜油 OI 早稻 RI 菜粕 RM 菜籽 RS 硅铁 SF 锰硅 SM
# 白糖 SR PTA TA 强麦 WH 动力煤 ZC
# 上期所 白银 AG 沪铝 AL 黄金 AU 沥青 BU 沪铜 CU 燃油 FU 热扎卷板 HC 沪镍 NI
# 沪铅 PB 螺纹钢 RB 橡胶 RU 沪锡 SN 线材 WR 沪锌 ZN
# 中金所 中证500 IC 沪深300 IF 上证50 IH 10年国债 T 5年国债 TF
shortDict = {'dce':['a','b','bb','c','fb','i','j','jd','jm','l','m','pp','v','y','p'],
'shfe':['ag','al','au','bu','cu','fu','hc','ni','pb','rb','ru','sn','wr','zn'],
'czce':['cf','fg','ma','oi','ri','rm','rs','sf','sm','sr','ta','wh','zc'],
'cffex':['ic','if','ih','t','tf']}
def getUrl(self,name,index=1):
shortname = utils.getShort(name)
exhouse_name = utils.getExhouseName(name)
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d")
return "http://vip.stock.finance.sina.com.cn/q/view/vFutures_History.php?page=" + str(index) + "&breed="+shortname+"0&start=1990-08-22&end="+time+"&jys="+exhouse_name+"&pz=" + shortname + "&hy="+shortname+"0&type=inner"
def spiderEveryDay(self):
c_list = []
C_url = self.getUrl(self.name)
print('爬取%s \n url=%s' % (self.name , C_url))
priceList = self.getPriceList(C_url)
for priceDay in priceList:
c_list.append(priceDay)
print(c_list)
for dayPrice in c_list:
if (utils.is_number(dayPrice[1]) and dayPrice[1]!=0):
self.dataProvider.writeData(dataDict=dayPrice)
if __name__ == '__main__':
# dceNameArray = ['豆一','豆二','胶合板','玉米','纤维板','铁矿石','焦炭','鸡蛋','焦煤','塑料','豆粕','PP','PVC','豆油','棕榈']
# czceNameArray = ['棉花','玻璃','郑醇','菜油','早稻','菜粕','菜籽','硅铁','锰硅','白糖','PTA','强麦','动力煤']
# shfeNameArray = ['白银','沪铝','黄金','沥青','沪铜','燃油','热扎卷板','沪镍','沪铅','螺纹钢','橡胶','沪锡','线材','沪锌']
# cffexNameArray = ['中证500','沪深300','上证50','10年国债','5年国债']
# array = dceNameArray + czceNameArray + shfeNameArray + cffexNameArray
# for name in array:
# spider = DataSpider(name)
# spider.spiderEveryDay()
# for name in cffexNameArray:
# spider = DataSpider(name)
# spider.start()
spider = DataSpider('螺纹钢')
spider.start()
|
muravjov/ansible-modules-core
|
refs/heads/devel
|
packaging/os/rhn_register.py
|
77
|
#!/usr/bin/python
# (c) James Laska
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rhn_register
short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
description:
- Manage registration to the Red Hat Network.
version_added: "1.2"
author: James Laska
notes:
- In order to register a system, rhnreg_ks requires either a username and password, or an activationkey.
requirements:
- rhnreg_ks
options:
state:
description:
- whether to register (C(present)), or unregister (C(absent)) a system
required: false
choices: [ "present", "absent" ]
default: "present"
username:
description:
- Red Hat Network username
required: False
default: null
password:
description:
- Red Hat Network password
required: False
default: null
server_url:
description:
- Specify an alternative Red Hat Network server URL
required: False
default: Current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date) is the default
activationkey:
description:
- supply an activation key for use with registration
required: False
default: null
channels:
description:
- Optionally specify a list of comma-separated channels to subscribe to upon successful registration.
required: false
default: []
'''
EXAMPLES = '''
# Unregister system from RHN.
- rhn_register: state=absent username=joe_user password=somepass
# Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
- rhn_register: state=present username=joe_user password=somepass
# Register with activationkey (1-222333444) and enable extended update support.
- rhn_register: state=present activationkey=1-222333444 enable_eus=true
# Register as user (joe_user) with password (somepass) against a satellite
# server specified by (server_url).
- rhn_register: >
state=present
username=joe_user
password=somepass
server_url=https://xmlrpc.my.satellite/XMLRPC
# Register as user (joe_user) with password (somepass) and enable
# channels (rhel-x86_64-server-6-foo-1) and (rhel-x86_64-server-6-bar-1).
- rhn_register: state=present username=joe_user
password=somepass
channels=rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
'''
import sys
import types
import xmlrpclib
import urlparse
# Attempt to import rhn client tools
sys.path.insert(0, '/usr/share/rhn')
try:
import up2date_client
import up2date_client.config
except ImportError, e:
module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e)
# INSERT REDHAT SNIPPETS
from ansible.module_utils.redhat import *
# INSERT COMMON SNIPPETS
from ansible.module_utils.basic import *
class Rhn(RegistrationBase):
def __init__(self, username=None, password=None):
RegistrationBase.__init__(self, username, password)
self.config = self.load_config()
def load_config(self):
'''
Read configuration from /etc/sysconfig/rhn/up2date
'''
self.config = up2date_client.config.initUp2dateConfig()
# Add support for specifying a default value w/o having to standup some
# configuration. Yeah, I know this should be subclassed ... but, oh
# well
def get_option_default(self, key, default=''):
# ignore pep8 W601 errors for this line
# setting this to use 'in' does not work in the rhn library
if self.has_key(key):
return self[key]
else:
return default
self.config.get_option = types.MethodType(get_option_default, self.config, up2date_client.config.Config)
return self.config
@property
def hostname(self):
'''
Return the non-xmlrpc RHN hostname. This is a convenience method
used for displaying a more readable RHN hostname.
Returns: str
'''
url = urlparse.urlparse(self.config['serverURL'])
return url[1].replace('xmlrpc.','')
@property
def systemid(self):
systemid = None
xpath_str = "//member[name='system_id']/value/string"
if os.path.isfile(self.config['systemIdPath']):
fd = open(self.config['systemIdPath'], 'r')
xml_data = fd.read()
fd.close()
# Ugh, xml parsing time ...
# First, try parsing with libxml2 ...
if systemid is None:
try:
import libxml2
doc = libxml2.parseDoc(xml_data)
ctxt = doc.xpathNewContext()
systemid = ctxt.xpathEval(xpath_str)[0].content
doc.freeDoc()
ctxt.xpathFreeContext()
except ImportError:
pass
# m-kay, let's try with lxml now ...
if systemid is None:
try:
from lxml import etree
root = etree.fromstring(xml_data)
systemid = root.xpath(xpath_str)[0].text
except ImportError:
pass
# Strip the 'ID-' prefix
if systemid is not None and systemid.startswith('ID-'):
systemid = systemid[3:]
return int(systemid)
@property
def is_registered(self):
'''
Determine whether the current system is registered.
Returns: True|False
'''
return os.path.isfile(self.config['systemIdPath'])
def configure(self, server_url):
'''
Configure system for registration
'''
self.config.set('serverURL', server_url)
self.config.save()
def enable(self):
'''
Prepare the system for RHN registration. This includes ...
* enabling the rhnplugin yum plugin
* disabling the subscription-manager yum plugin
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', True)
self.update_plugin_conf('subscription-manager', False)
def register(self, enable_eus=False, activationkey=None):
'''
Register system to RHN. If enable_eus=True, extended update
support will be requested.
'''
register_cmd = "/usr/sbin/rhnreg_ks --username='%s' --password='%s' --force" % (self.username, self.password)
if self.module.params.get('server_url', None):
register_cmd += " --serverUrl=%s" % self.module.params.get('server_url')
if enable_eus:
register_cmd += " --use-eus-channel"
if activationkey is not None:
register_cmd += " --activationkey '%s'" % activationkey
# FIXME - support --profilename
# FIXME - support --systemorgid
rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True)
def api(self, method, *args):
'''
Convenience RPC wrapper
'''
if not hasattr(self, 'server') or self.server is None:
if self.hostname != 'rhn.redhat.com':
url = "https://%s/rpc/api" % self.hostname
else:
url = "https://xmlrpc.%s/rpc/api" % self.hostname
self.server = xmlrpclib.Server(url, verbose=0)
self.session = self.server.auth.login(self.username, self.password)
func = getattr(self.server, method)
return func(self.session, *args)
def unregister(self):
'''
Unregister a previously registered system
'''
# Initiate RPC connection
self.api('system.deleteSystems', [self.systemid])
# Remove systemid file
os.unlink(self.config['systemIdPath'])
def subscribe(self, channels=[]):
if len(channels) <= 0:
return
current_channels = self.api('channel.software.listSystemChannels', self.systemid)
new_channels = [item['channel_label'] for item in current_channels]
new_channels.extend(channels)
return self.api('channel.software.setSystemChannels', self.systemid, new_channels)
def _subscribe(self, channels=[]):
'''
Subscribe to requested yum repositories using 'rhn-channel' command
'''
rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password)
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True)
# Enable requested repoid's
for wanted_channel in channels:
# Each inserted repo regexp will be matched. If no match, no success.
for available_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end
if re.search(wanted_repo, available_channel):
rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True)
def main():
# Read system RHN configuration
rhn = Rhn()
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
username = dict(default=None, required=False),
password = dict(default=None, required=False),
server_url = dict(default=rhn.config.get_option('serverURL'), required=False),
activationkey = dict(default=None, required=False),
enable_eus = dict(default=False, type='bool'),
channels = dict(default=[], type='list'),
)
)
state = module.params['state']
rhn.username = module.params['username']
rhn.password = module.params['password']
rhn.configure(module.params['server_url'])
activationkey = module.params['activationkey']
channels = module.params['channels']
rhn.module = module
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or rhn.username or rhn.password):
module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, rhn.password))
if not activationkey and not (rhn.username and rhn.password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
# Register system
if rhn.is_registered:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhn.enable()
rhn.register(module.params['enable_eus'] == True, activationkey)
rhn.subscribe(channels)
except Exception, e:
module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e))
module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
# Ensure system is *not* registered
if state == 'absent':
if not rhn.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhn.unregister()
except Exception, e:
module.fail_json(msg="Failed to unregister: %s" % e)
module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
main()
|
zifeo/nest-simulator
|
refs/heads/master
|
pynest/examples/brunel_exp_multisynapse_nest.py
|
9
|
# -*- coding: utf-8 -*-
#
# brunel_exp_multisynapse_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Random balanced network (exp synapses, multiple time constants)
---------------------------------------------------------------
This script simulates an excitatory and an inhibitory population on
the basis of the network used in
Brunel N, Dynamics of Sparsely Connected Networks of Excitatory and
Inhibitory Spiking Neurons, Journal of Computational Neuroscience 8,
183–208 (2000).
The example demonstrate the usage of the multisynapse neuron
model. Each spike arriving at the neuron triggers an exponential
PSP. The time constant associated with the PSP is defined in the
recepter type array tau_syn of each neuron. The receptor types of all
connections are uniformally distributed, resulting in uniformally
distributed time constants of the PSPs.
When connecting the network customary synapse models are used, which
allow for querying the number of created synapses. Using spike
detectors the average firing rates of the neurons in the populations
are established. The building as well as the simulation time of the
network are recorded.
'''
'''
Importing all necessary modules for simulation, analysis and plotting.
'''
import nest
import nest.raster_plot
import time
from numpy import exp
nest.ResetKernel()
'''
Assigning the current time to a variable in order to determine the
build time of the network.
'''
startbuild = time.time()
'''
Assigning the simulation parameters to variables.
'''
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
'''
Definition of the parameters crucial for asynchronous irregular firing
of the neurons.
'''
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
'''
Definition of the number of neurons in the network and the number of
neuron recorded from
'''
order = 2500
NE = 4*order # number of excitatory neurons
NI = 1*order # number of inhibitory neurons
N_neurons = NE+NI # number of neurons in total
N_rec = 50 # record from 50 neurons
'''
Definition of connectivity parameter
'''
CE = int(epsilon*NE) # number of excitatory synapses per neuron
CI = int(epsilon*NI) # number of inhibitory synapses per neuron
C_tot = int(CI+CE) # total number of synapses per neuron
'''
Initialization of the parameters of the integrate and fire neuron and
the synapses. The parameter of the neuron are stored in a dictionary.
'''
tauMem = 20.0 # time constant of membrane potential in ms
theta = 20.0 # membrane threshold potential in mV
J = 0.1 # postsynaptic amplitude in mV
nr_ports = 100 # number of receptor types
tau_syn = [0.1+0.01*i for i in range(nr_ports)] # PSP-time-constant-array available in each neuron ranging from 0.1 ms to 1.09 ms
neuron_params= {"C_m": 1.0,
"tau_m": tauMem,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta,
"tau_syn": tau_syn}
J_ex = J # amplitude of excitatory postsynaptic current
J_in = -g*J_ex # amplitude of inhibitory postsynaptic current
'''
Definition of threshold rate, which is the external rate needed to fix
the membrane potential around its threshold, the external firing rate
and the rate of the poisson generator which is multiplied by the
in-degree CE and converted to Hz by multiplication by 1000.
'''
nu_th = theta/(J*CE*tauMem)
nu_ex = eta*nu_th
p_rate = 1000.0*nu_ex*CE
'''
Configuration of the simulation kernel by the previously defined time
resolution used in the simulation. Setting "print_time" to True prints
the already processed simulation time as well as its percentage of the
total simulation time.
'''
nest.SetKernelStatus({"resolution": dt, "print_time": True, "overwrite_files": True})
print("Building network")
'''
Configuration of the model `iaf_psc_exp_multisynapse` and
`poisson_generator` using SetDefaults(). This function expects the
model to be the inserted as a string and the parameter to be specified
in a dictionary. All instances of theses models created after this
point will have the properties specified in the dictionary by default.
'''
nest.SetDefaults("iaf_psc_exp_multisynapse", neuron_params)
nest.SetDefaults("poisson_generator",{"rate": p_rate})
'''
Creation of the nodes using `Create`. We store the returned handles in
variables for later reference. Here the excitatory and inhibitory, as
well as the poisson generator and two spike detectors. The spike
detectors will later be used to record excitatory and inhibitory
spikes.
'''
nodes_ex = nest.Create("iaf_psc_exp_multisynapse",NE)
nodes_in = nest.Create("iaf_psc_exp_multisynapse",NI)
noise = nest.Create("poisson_generator")
espikes = nest.Create("spike_detector")
ispikes = nest.Create("spike_detector")
'''
Configuration of the spike detectors recording excitatory and
inhibitory spikes using `SetStatus`, which expects a list of node
handles and a list of parameter dictionaries. Setting the variable
"to_file" to True ensures that the spikes will be recorded in a .gdf
file starting with the string assigned to label. Setting "withtime"
and "withgid" to True ensures that each spike is saved to file by
stating the gid of the spiking neuron and the spike time in one line.
'''
nest.SetStatus(espikes,[{"label": "brunel-py-ex",
"withtime": True,
"withgid": True,
"to_file": True}])
nest.SetStatus(ispikes,[{"label": "brunel-py-in",
"withtime": True,
"withgid": True,
"to_file": True}])
print("Connecting devices")
'''
Definition of a synapse using `CopyModel`, which expects the model
name of a pre-defined synapse, the name of the customary synapse and
an optional parameter dictionary. The parameters defined in the
dictionary will be the default parameter for the customary
synapse. Here we define one synapse for the excitatory and one for the
inhibitory connections giving the previously defined weights and equal
delays.
'''
nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay})
nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay})
'''
Connecting the previously defined poisson generator to the excitatory
and inhibitory neurons using the excitatory synapse. Since the poisson
generator is connected to all neurons in the population the default
rule ('all_to_all') of Connect() is used. The synaptic properties are
pre-defined in a dictionary and inserted via syn_spec. As synaptic
model the pre-defined synapses "excitatory" and "inhibitory" are
choosen, thus setting weight and delay. The recepter type is drawn
from a distribution for each connection, which is specified in the
synapse properties by assigning a dictionary to the keyword
'receptor_type', which includes the specification of the distribution
and the associated parameter.
'''
syn_params_ex = {"model": "excitatory",
"receptor_type": {"distribution": "uniform_int", "low": 1, "high": nr_ports}
}
syn_params_in = {"model": "inhibitory",
"receptor_type": {"distribution": "uniform_int", "low": 1, "high": nr_ports}
}
nest.Connect(noise, nodes_ex, syn_spec=syn_params_ex)
nest.Connect(noise, nodes_in, syn_spec=syn_params_ex)
'''
Connecting the first N_rec nodes of the excitatory and inhibitory
population to the associated spike detectors using excitatory
synapses. Here the same shortcut for the specification of the synapse
as defined above is used.
'''
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
print("Connecting network")
print("Excitatory connections")
'''
Connecting the excitatory population to all neurons while distribution
the ports. Here we use the previously defined parameter dictionary
syn_params_ex. Beforehand, the connection parameter are defined in a
dictionary. Here we use the connection rule 'fixed_indegree', which
requires the definition of the indegree.
'''
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex+nodes_in, conn_params_ex, syn_params_ex)
print("Inhibitory connections")
'''
Connecting the inhibitory population to all neurons while distribution
the ports. Here we use the previously defined parameter dictionary
syn_params_in.The connection parameter are defined analogously to the
connection from the excitatory population defined above.
'''
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex+nodes_in, conn_params_in, syn_params_in)
'''
Storage of the time point after the buildup of the network in a
variable.
'''
endbuild=time.time()
'''
Simulation of the network.
'''
print("Simulating")
nest.Simulate(simtime)
'''
Storage of the time point after the simulation of the network in a
variable.
'''
endsimulate= time.time()
'''
Reading out the total number of spikes received from the spike
detector connected to the excitatory population and the inhibitory
population.
'''
events_ex = nest.GetStatus(espikes,"n_events")[0]
events_in = nest.GetStatus(ispikes,"n_events")[0]
'''
Calculation of the average firing rate of the excitatory and the
inhibitory neurons by dividing the total number of recorded spikes by
the number of neurons recorded from and the simulation time. The
multiplication by 1000.0 converts the unit 1/ms to 1/s=Hz.
'''
rate_ex = events_ex/simtime*1000.0/N_rec
rate_in = events_in/simtime*1000.0/N_rec
'''
Reading out the number of connections established using the excitatory
and inhibitory synapse model. The numbers are summed up resulting in
the total number of synapses.
'''
num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\
nest.GetDefaults("inhibitory")["num_connections"]
'''
Establishing the time it took to build and simulate the network by
taking the difference of the pre-defined time variables.
'''
build_time = endbuild-startbuild
sim_time = endsimulate-endbuild
'''
Printing the network properties, firing rates and building times.
'''
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
'''
Plot a raster of the excitatory neurons and a histogram.
'''
nest.raster_plot.from_device(espikes, hist=True)
|
karmix/blivet
|
refs/heads/master
|
tests/devicelibs_test/mdraid_test.py
|
6
|
import unittest
import blivet.devicelibs.mdraid as mdraid
class MDRaidTestCase(unittest.TestCase):
def testMDRaid(self):
##
## level lookup
##
self.assertEqual(mdraid.RAID_levels.raidLevel("stripe").name, "raid0")
self.assertEqual(mdraid.RAID_levels.raidLevel("mirror").name, "raid1")
self.assertEqual(mdraid.RAID_levels.raidLevel("4").name, "raid4")
self.assertEqual(mdraid.RAID_levels.raidLevel(5).name, "raid5")
self.assertEqual(mdraid.RAID_levels.raidLevel("RAID6").name, "raid6")
self.assertEqual(mdraid.RAID_levels.raidLevel("raid10").name, "raid10")
|
iam-TJ/node-gyp
|
refs/heads/master
|
gyp/test/variables/commands/gyptest-commands-ignore-env.py
|
330
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that environment variables are ignored when --ignore-environment is
specified.
"""
import os
import TestGyp
test = TestGyp.TestGyp(format='gypd')
os.environ['GYP_DEFINES'] = 'FOO=BAR'
os.environ['GYP_GENERATORS'] = 'foo'
os.environ['GYP_GENERATOR_FLAGS'] = 'genflag=foo'
os.environ['GYP_GENERATOR_OUTPUT'] = 'somedir'
expect = test.read('commands.gyp.ignore-env.stdout').replace('\r\n', '\n')
test.run_gyp('commands.gyp',
'--debug', 'variables',
'--ignore-environment',
stdout=expect, ignore_line_numbers=True)
# Verify the commands.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands.gypd').replace('\r', '')
expect = test.read('commands.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `commands.gypd'"
test.diff(expect, contents, 'commands.gypd ')
test.fail_test()
test.pass_test()
|
RobertABT/heightmap
|
refs/heads/master
|
build/matplotlib/lib/mpl_toolkits/mplot3d/__init__.py
|
7
|
from axes3d import Axes3D
|
rlowrance/python-unified-containers
|
refs/heads/master
|
src/puc-old.py
|
1
|
'''Python Uniform Containers that mimic K's container types'''
# TODO: write documentation for V, H, T, KT
# goal: assure that the API is easy to describe
import abc
import collections
import numpy as np
import pandas as pd
import pdb
import unittest
class PUC(object):
__metaclass__ = abc.ABCMeta
pass
class V(PUC):
__metaclass__ = abc.ABCMeta
def __len__(self):
return self.nparray.size
def to_numpy_array(self):
return self.nparray
def _items(self):
s = ''
for i in xrange(self.nparray.size):
s += self.nparray[i].__str__()
if i != self.nparray.size - 1:
s += ', '
return s
def _validate_len_key(self, key):
'continue if len(key) is ok'
if len(key) == self.nparray.size:
return
format = 'len(key) %s != len(data) %s'
msg = format % (len(key), self.nparray.size)
raise IndexError(msg)
def _getitem(self, Cls, key):
'''return Cls instance or a python scalar or raise IndexError
ARGS
key : if plain integer or long integer
then return a python float
if Vint64
then return a Vfloat64 as selected by index values
if Vint1
then return a Vfloat64 as selected by 1 values
'''
# note: in Q, indexing a missing item result in null, not an error
if isinstance(key, (int, long)):
# note: in Python 3, int and long are the same type
return self.nparray[key]
if isinstance(key, bool):
return self.nparray[int(key)]
if isinstance(key, Vint64):
return Cls(self.nparray[key.nparray])
if isinstance(key, Vbool):
self._validate_len_key(key)
return Cls(self.nparray[key.nparray])
# TODO: extend to allow indexing by Vobj
raise IndexError('type(key) = ' + type(key))
def _setitem(self, key, value):
'''mutate self and return None'''
if isinstance(key, (int, long)):
self.nparray[key] = value
return
if isinstance(key, bool):
self.nparray[int(key)] = value
if isinstance(key, Vint64):
self.nparray[key.nparray] = value
return
if isinstance(key, Vbool):
self._validate_len_key(key)
self.nparray[key.nparray] = value
return
raise IndexError('type(key) = ' + type(key))
def _concatenate(self, other):
'extend self by appending each element of other'
# Q operator
pass
def _find(self, other):
'return indices of each element of other in self'
if isinstance(other, V):
# np.append appends to copy of first arg
return np.append(self.nparray, other.nparray)
# Q operator
pass
def _equal(self, other):
'order is significant'
pass
def _identical(self, other):
'have same adddress'
pass
class Vfloat64(V):
'64-bit floating point vector'
def __init__(self, obj):
self.nparray = np.array(
obj,
dtype=np.float64,
copy=True,
order='C',
subok=False,
ndmin=1)
def __str__(self):
return 'Vfloat64(' + super(Vfloat64, self)._items() + ')'
def __repr__(self):
return 'Vfloat64([' + super(Vfloat64, self)._items() + '])'
def __getitem__(self, key):
return self._getitem(Vfloat64, key)
def __setitem__(self, key, value):
self._setitem(key, value)
def exp(self):
'return new Vfloat64'
return Vfloat64(np.exp(self.nparray))
def __radd__(self, other):
'other + V'
return self.__add__(other)
def __add__(self, other):
'return new Vfloat64'
if isinstance(other, (Vfloat64, Vint64, Vbool)):
return Vfloat64(np.add(self.nparray, other.nparray))
if isinstance(other, (int, long, float)):
# int, long, float
return Vfloat64(np.add(self.nparray,
np.full(self.nparray.shape, other)))
raise TypeError('type(other) = ' + str(type(other)))
def concatenate(self, other):
'append other to self; do not type conversions'
if isinstance(other, Vfloat64):
# np.append appends to copy of first arg
return Vfloat64(np.append(self.nparray, other.nparray))
if isinstance(other, float):
return Vfloat64(np.append(self.nparray, np.array([other])))
raise TypeError('type(other) = ' + str(type(other)))
class Vint64(V):
'64-bit integer vector'
def __init__(self, obj):
self.nparray = np.array(
obj,
dtype=np.int64,
copy=True,
order='C',
subok=False,
ndmin=1)
def __str__(self):
return 'Vint64(' + super(Vint64, self)._items() + ')'
def __repr__(self):
return 'Vint64([' + super(Vint64, self)._items() + '])'
def __getitem__(self, key):
return self._getitem(Vint64, key)
def __setitem__(self, key, value):
self._setitem(key, value)
def __radd__(self, other):
'other + V'
return self.__add__(other)
def __add__(self, other):
'return new V'
if isinstance(other, Vfloat64):
return Vfloat64(np.add(self.nparray, other.nparray))
if isinstance(other, (Vint64, Vbool)):
return Vint64(np.add(self.nparray, other.nparray))
if isinstance(other, float):
# int, long, float
return Vfloat64(np.add(self.nparray,
np.full(self.nparray.shape, other)))
if isinstance(other, (int, long, float)):
# int, long, float
if isinstance(other, float):
return Vfloat64(np.add(self.nparray,
np.full(self.nparray.shape, other)))
if isinstance(other, (int, bool)):
return Vint64(np.add(self.nparray,
np.full(self.nparray.shape, other)))
raise TypeError('type(other) = ' + type(other))
def concatenate(self, other):
'append other to self; do not type conversions'
if isinstance(other, Vint64):
# np.append appends to copy of first arg
return Vint64(np.append(self.nparray, other.nparray))
if isinstance(other, (int, long, bool)):
return Vint64(np.append(self.nparray, np.array([other])))
raise TypeError('type(other) = ' + str(type(other)))
class Vbool(V): # TODO: Rename Vint8 (if its signed)
'boolean vector'
def __init__(self, obj):
self.nparray = np.array(
obj,
dtype=np.bool, # TODO: make smaller
copy=True,
order='C',
subok=False,
ndmin=1)
def __str__(self):
return 'Vbool(' + super(Vbool, self)._items() + ')'
def __repr__(self):
return 'Vbool([' + super(Vbool, self)._items() + '])'
def __getitem__(self, key):
return self._getitem(Vbool, key)
def __setitem__(self, key, value):
self._setitem(key, value)
def __radd__(self, other):
'return new V: other + vbool'
return self.__add__(other)
def __add__(self, other):
'return new V: Vbool + other'
if isinstance(other, Vfloat64):
return Vfloat64(np.add(self.nparray, other.nparray))
if isinstance(other, Vint64):
return Vint64(np.add(self.nparray, other.nparray))
if isinstance(other, Vbool):
# note: numpy treats + for bools as "or", not as "and"
return Vint64(np.add(self.nparray.astype(long),
other.nparray.astype(long)))
if isinstance(other, (int, long, float)):
# int, long, float
if isinstance(other, (int, bool)):
return Vint64(np.add(self.nparray,
np.full(self.nparray.shape, other)))
if isinstance(other, float):
return Vfloat64(np.add(self.nparray,
np.full(self.nparray.shape, other)))
raise TypeError('type(other) = ' + type(other))
def concatenate(self, other):
'append other to self; do not type conversions'
if isinstance(other, Vbool):
# np.append appends to copy of first arg
return Vbool(np.append(self.nparray, other.nparray))
if isinstance(other, (bool)):
return Vbool(np.append(self.nparray, np.array([other])))
raise TypeError('type(other) = ' + str(type(other)))
class Vobj(V):
'vector of arbitrary objects'
def __init__(self, obj):
self.nparray = np.array(
obj,
dtype=np.object,
copy=True,
order='C',
subok=False,
ndmin=1)
def __str__(self):
return 'Vobj(' + super(Vobj, self)._items() + ')'
def __repr__(self):
return 'Vobj([' + super(Vobj, self)._items() + '])'
def __getitem__(self, key):
return self._getitem(Vobj, key)
def __setitem__(self, key, value):
self._setitem(key, value)
def __radd__(self, other):
'other + VObj'
# TODO: fix, does not work for string concatenation
return self.__add__(other)
def __add__(self, other):
'Vobj + other'
if isinstance(other, V):
if len(self.nparray) != len(other.nparray):
msg = 'different lengths: %d, %d' % (len(self.nparray), len(other.nparray))
raise TypeError(msg)
result = np.empty(shape=(len(self.nparray)), dtype=object)
for i in xrange(len(self.nparray)):
a = self.nparray[i]
b = other.nparray[i]
try:
r = a + b
except TypeError, m:
msg = 'type(a) %s type(b) %s' % (type(a), type(b))
raise TypeError(m + ' ' + msg)
result[i] = r
return Vobj(result)
raise NotImplemented('implement other cases')
def concatenate(self, other):
if isinstance(other, V):
return Vobj(np.append(self.nparray, other.nparray))
try:
return Vobj(np.append(self.nparray, other))
except:
raise TypeError('other is type %s' % type(other))
raise TypeError('other is type %s' % type(other))
class D(PUC): # TODO: rename to H for hash; want name not in base Python
'dictionary with [] extended to allow for a sequence'
def __init__(self, key_list, value_list):
'''initialize'''
# Note: in Q, the keys do not need to be unique
self.d = None # TODO: write me
def keys(self):
'''return list of keys'''
pass
def cols(self):
return self.keys()
def values(self):
'''return list of values'''
def equal(self, other):
'''order of keys is significant; not the identity operator
If other is a V, compare self.values and other
'''
pass
def identical(self, other):
'have the same address'
pass
def find(self, other):
'reverse lookup; always succeeds, possibly returning None'
pass
def concatenate(self, other):
'the mapping in other dominates'
pass
def as_python_dict(self):
'''return Python dict'''
return self.d
def take(self, keys):
'''return new D with the keys and self[keys]'''
pass
def __del__(self, key):
'''delete key and value
in Q, removing a key that does not exist has no effect
'''
pass
def add(self, other):
'''Perform + on command keys; others merge (as in concatenate)
'''
pass
def __getitem__(self, key):
'''return Python list of same shape as key or scalar or None
ARGS
key : if scalar
then return d[key]
if a sequence (including a V)
then return {v[0], v[1], ... }
In Q, if keys are not of uniform shape, the lookup fails
at the first key of a different shape.
'''
if isinstance(key, Vfloat64):
raise IndexError('attempt to index D by Vfloat64')
if isinstance(key, (Vint64, Vbool)):
result = []
for key_value in np.nditer(key.nparray):
if key_value in self.d:
result.append(self.d[key_value])
else:
result.append(None)
return key_value
if isinstance(key, collections.Iterable):
result = []
for key_value in key:
if key_value in self.d:
result.append(self.d[key_value])
else:
result.append(None)
return key_value
if key in self.d:
return self.d[key]
else:
return None
def __setitem__(sefl, key, value):
'''mutate and return self'''
pass
class T(object):
'a flipped dictionary with string keys and V values, all of same len'
def __init__(self, obj):
assert(type(obj) == D)
self.d = obj
def as_pandas_dataframe(self):
'''return Pandas DataFrame with numpy.array columns'''
items = [(k, self.d[k].as_numpy_array()) for k in self.d.keys()]
return pd.from_items(items, orient='columns')
def __getitem__(self, key):
'maybe also return a python scalar'
rows, cols = key
pass # figure out the combinations
def __setitem__(self, key, value):
pass
def ply(self, vars, fun, extra):
'''apply fun to groups defined by vars
Inspired by the plyr package in R
ARGS
vars: a list of strings, each string is a column name in self
fun(var_values, extra): a function returning a T with
the same columns for each invocation
RETURNS
new T with one column for each var plus one result column from each T
returned by the calls to fun
'''
pass
# these methods are inspired by Wickham's dplyr package for R
def filter(self, vars, fun, extra):
'return new T for which fun(var_values, extra) returns true'
pass
def select_rows(self, slice):
'return new T with rows specified by the python slice'''
pass
def select_columns(self, vars):
'return new T with columns as specified by the vars'
pass
def rename(self, old_new_names):
'''return new T selectively updating column names
ARGS
old_new_names = ((old_name,new_name), ...)
'''
pass
def distinct(self):
'''return new T without duplicated rows'''
pass
def mutate(self, vars, fun, extra):
'''like ply, but mutate self by adding the extra columns'''
pass
def summarize(self, fun, extra):
'''return new T formed by passing each row to fun(var_values, extra)'''
pass
def sample_n():
'''return new T with n randomly-selected rows'''
pass
class TestVfloat64(unittest.TestCase):
def assert_equal_Vfloat64(self, a, b):
self.assertTrue(isinstance(a, Vfloat64))
self.assertTrue(isinstance(b, Vfloat64))
self.assertEqual(len(a), len(b))
for i in xrange(len(a)):
self.assertAlmostEqual(a[i], b[i])
def test_construction_from_list(self):
x = [10, 23]
v = Vfloat64(x)
self.assertTrue(isinstance(v, Vfloat64))
self.assertTrue(isinstance(v, PUC))
self.assertTrue(len(v) == 2)
# TODO: these don't work until __getitem__ is implemented
self.assertTrue(v[0] == 10.0)
self.assertTrue(v[1] == 23.0)
def test_get_item(self):
v = Vfloat64([10.0, 23.0])
# index via Vint64
index1 = Vint64([1, 1, 1])
v_index1 = v[index1]
self.assertTrue(isinstance(v_index1, Vfloat64))
self.assertEqual(len(v_index1), 3)
self.assertEqual(v_index1[0], 23.0)
self.assertEqual(v_index1[1], 23.0)
self.assertEqual(v_index1[2], 23.0)
# index via Python int
index2 = 1
v_index2 = v[index2]
self.assertTrue(isinstance(v_index2, float))
self.assertEqual(v_index2, 23.0)
# index via Vbool
index3 = Vbool([True, False])
v_index3 = v[index3]
self.assertTrue(isinstance(v_index3, Vfloat64))
self.assertEqual(len(v_index3), 1)
self.assertEqual(v_index3[0], 10.0)
# show throw: too few boolean indices
try:
index4 = Vbool([True])
v_index4 = v[index4]
print v_index4
self.assertFalse(True)
except:
self.assertTrue(True)
def test_set_item(self):
# index via Vint64
v = Vfloat64([10.0, 23.0, 47.0])
index1 = Vint64([2, 0])
v[index1] = 99.0
self.assertTrue(isinstance(v, Vfloat64))
self.assertEqual(len(v), 3)
self.assertEqual(v[0], 99.0)
self.assertEqual(v[1], 23.0)
self.assertEqual(v[2], 99.0)
# index via Python int
v = Vfloat64([10.0, 23.0, 47.0])
v[1] = 99.0
self.assertTrue(isinstance(v, Vfloat64))
self.assertEqual(len(v), 3)
self.assertEqual(v[0], 10.0)
self.assertEqual(v[1], 99.0)
self.assertEqual(v[2], 47.0)
# index via Vbool
v = Vfloat64([10.0, 23.0, 47.0])
index3 = Vbool([True, False, True])
v[index3] = 99.0
self.assertTrue(isinstance(v, Vfloat64))
self.assertEqual(len(v), 3)
self.assertEqual(v[0], 99.0)
self.assertEqual(v[1], 23.0)
self.assertEqual(v[2], 99.0)
def test_exp(self):
v = Vfloat64([1, 2, 3])
e = v.exp()
self.assertEqual(len(e), 3)
self.assertAlmostEqual(v[2], 3)
self.assertAlmostEqual(e[2], 20.08, 1)
def test_plus(self):
# plus(float, float)
va = Vfloat64([10, 20])
vb = Vfloat64([100, 200])
r = va + vb
self.assertTrue(isinstance(r, Vfloat64))
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 110.0)
self.assertEqual(r[1], 220.0)
# plus(float, int)
vb = Vint64([100, 200])
r = va + vb
self.assertTrue(isinstance(r, Vfloat64))
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 110.0)
self.assertEqual(r[1], 220.0)
# plus(float, bool)
vb = Vbool([False, True])
r = va + vb
self.assertTrue(isinstance(r, Vfloat64))
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 10)
self.assertEqual(r[1], 21)
# test propagation of scalars: float, int, bool
r = va + 1.0
self.assert_equal_Vfloat64(r, Vfloat64([11, 21]))
r = va + 2
self.assert_equal_Vfloat64(r, Vfloat64([12, 22]))
r = va + True
self.assert_equal_Vfloat64(r, Vfloat64([11, 21]))
def test_concatenate(self):
# other is Vfloat64
va = Vfloat64([10, 20])
vb = Vfloat64([100, 200])
r = va.concatenate(vb)
self.assertTrue(isinstance(r, Vfloat64))
self.assertEqual(len(r), 4)
self.assertEqual(r[0], 10.0)
self.assertEqual(r[3], 200.0)
# other is float
r = va.concatenate(23.0)
self.assertTrue(isinstance(r, Vfloat64))
self.assertEqual(len(r), 3)
self.assertEqual(r[0], 10.0)
self.assertEqual(r[2], 23.0)
# other is anything else
for other in (True, 23, Vint64([100, 200]), Vbool([False, True])):
try:
v = va.concatenate(other)
print other, v
self.assertTrue(False) # expected to throw
except TypeError:
self.assertTrue(True)
class TestVint64(unittest.TestCase):
def assert_equal_Vfloat64(self, a, b):
self.assertTrue(isinstance(a, Vfloat64))
self.assertTrue(isinstance(b, Vfloat64))
self.assertEqual(len(a), len(b))
for i in xrange(len(a)):
self.assertAlmostEqual(a[i], b[i])
def assert_equal_Vint64(self, a, b):
self.assertTrue(isinstance(a, Vint64))
self.assertTrue(isinstance(b, Vint64))
self.assertEqual(len(a), len(b))
for i in xrange(len(a)):
self.assertAlmostEqual(a[i], b[i])
def test_construction_from_list(self):
x = [10, 23]
v = Vint64(x)
self.assertTrue(isinstance(v, Vint64))
self.assertTrue(isinstance(v, PUC))
self.assertTrue(len(v) == 2)
self.assertEqual(v[0], 10)
self.assertEqual(v[1], 23)
def test_plus(self):
# plus(int, float)
va = Vint64([10, 20])
vb = Vfloat64([100, 200])
r = va + vb
self.assertTrue(isinstance(r, Vfloat64))
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 110.0)
self.assertEqual(r[1], 220.0)
# plus(int, int)
vb = Vint64([100, 200])
r = va + vb
self.assertTrue(isinstance(r, Vint64))
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 110)
self.assertEqual(r[1], 220)
# plus(int, bool)
vb = Vbool([False, True])
r = va + vb
self.assertTrue(isinstance(r, Vint64))
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 10)
self.assertEqual(r[1], 21)
# test propagation of scalars: float, int, bool
r = va + 1.0
self.assert_equal_Vfloat64(r, Vfloat64([11, 21]))
r = va + 2
self.assert_equal_Vint64(r, Vint64([12, 22]))
r = va + True
self.assert_equal_Vint64(r, Vint64([11, 21]))
def test_concatenate(self):
# other is Vint64
va = Vint64([10, 20])
vb = Vint64([100, 200])
r = va.concatenate(vb)
self.assertTrue(isinstance(r, Vint64))
self.assertEqual(len(r), 4)
self.assertEqual(r[0], 10)
self.assertEqual(r[3], 200)
# other is long
r = va.concatenate(23L)
self.assertTrue(isinstance(r, Vint64))
self.assertEqual(len(r), 3)
self.assertEqual(r[0], 10)
self.assertEqual(r[2], 23)
# other is bool
r = va.concatenate(True)
self.assertTrue(isinstance(r, Vint64))
self.assertEqual(len(r), 3)
self.assertEqual(r[0], 10)
self.assertEqual(r[2], 1)
# other is anything else
others = (23.0, Vfloat64([100, 200]), Vbool([False, True]))
for other in others:
try:
v = va.concatenate(other)
print other, v
self.assertTrue(False) # expected to throw
except TypeError:
self.assertTrue(True)
class TestVbool(unittest.TestCase):
def assert_equal_Vfloat64(self, a, b):
self.assertTrue(isinstance(a, Vfloat64))
self.assertTrue(isinstance(b, Vfloat64))
self.assertEqual(len(a), len(b))
for i in xrange(len(a)):
self.assertAlmostEqual(a[i], b[i])
def assert_equal_Vint64(self, a, b):
self.assertTrue(isinstance(a, Vint64))
self.assertTrue(isinstance(b, Vint64))
self.assertEqual(len(a), len(b))
for i in xrange(len(a)):
self.assertAlmostEqual(a[i], b[i])
def test_construction_from_list(self):
x = [False, True]
v = Vbool(x)
self.assertTrue(isinstance(v, Vbool))
self.assertTrue(isinstance(v, PUC))
self.assertTrue(len(v) == 2)
self.assertEqual(v[0], False)
self.assertEqual(v[1], True)
def test_plus(self):
# plus(bool, float)
va = Vbool([False, True])
vb = Vfloat64([100, 200])
r = va + vb
self.assertTrue(isinstance(r, Vfloat64))
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 100)
self.assertEqual(r[1], 201)
# plus(bool, int)
vb = Vint64([100, 200])
r = va + vb
self.assertTrue(isinstance(r, Vint64))
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 100)
self.assertEqual(r[1], 201)
# plus(bool, bool)
vb = Vbool([False, True])
r = va + vb # numpy treats this as or, not +
self.assertTrue(isinstance(r, Vint64))
self.assertEqual(len(r), 2)
self.assertEqual(r[0], 0)
self.assertEqual(r[1], 2)
# test propagation of scalars: float, int, bool
r = va + 1.0
self.assert_equal_Vfloat64(r, Vfloat64([1, 2]))
r = va + 2
self.assert_equal_Vint64(r, Vint64([2, 3]))
r = va + True
self.assert_equal_Vint64(r, Vint64([1, 2]))
def test_concatenate(self):
# other is Vbool
va = Vbool([False, True])
vb = Vbool([True, False])
r = va.concatenate(vb)
self.assertTrue(isinstance(r, Vbool))
self.assertEqual(len(r), 4)
self.assertEqual(r[0], 0)
self.assertEqual(r[3], 0)
# other is bool
r = va.concatenate(True)
self.assertTrue(isinstance(r, Vbool))
self.assertEqual(len(r), 3)
self.assertEqual(r[0], 0)
self.assertEqual(r[2], 1)
# other is anything else
others = (20, 23.0, Vfloat64([100, 200]), Vint64([20]))
for other in others:
try:
v = va.concatenate(other)
print other, v
self.assertTrue(False) # expected to throw
except TypeError:
self.assertTrue(True)
class TestVobj(unittest.TestCase):
def assert_equal_Vfloat64(self, a, b):
self.assertTrue(isinstance(a, Vfloat64))
self.assertTrue(isinstance(b, Vfloat64))
self.assertEqual(len(a), len(b))
for i in xrange(len(a)):
self.assertAlmostEqual(a[i], b[i])
def assert_equal_Vobj(self, a, b):
self.assertTrue(isinstance(a, Vobj))
self.assertTrue(isinstance(b, Vobj))
self.assertEqual(len(a), len(b))
for i in xrange(len(a)):
self.assertEqual(a[i], b[i])
def test_construction_from_list(self):
f64 = Vfloat64([10, 20])
x = [True, 23.0, f64, 'abc']
v = Vobj(x)
self.assertTrue(isinstance(v, Vobj))
self.assertTrue(isinstance(v, V))
self.assertEqual(len(v), 4)
self.assertEqual(v[0], True)
self.assertEqual(v[1], 23.0)
self.assert_equal_Vfloat64(v[2], f64)
self.assertEqual(v[3], 'abc')
def test_add(self):
# Vobj + Vobj
va = Vobj([10, Vfloat64([100, 200]), 'abc'])
vb = Vobj([20, 1, 'def'])
r = va + vb
self.assertTrue(isinstance(r, Vobj))
self.assertEqual(len(r), 3)
self.assertEqual(r[0], 30.0)
self.assert_equal_Vfloat64(r[1], Vfloat64([101, 201]))
self.assertEqual(r[2], 'abcdef')
r2 = vb + va
self.assertEqual(r[0], r2[0])
self.assert_equal_Vfloat64(r[1], r2[1])
self.assertNotEqual(r[2], r2[2])
def test_concatenate(self):
va = Vobj(['a', 10])
vb = Vobj([True, 23.0])
r = va.concatenate(vb)
self.assertEqual(len(r), 4)
self.assertEqual(r[0], 'a')
self.assertEqual(r[3], 23.0)
r = va.concatenate('abc')
self.assertEqual(len(r), 3)
self.assertEqual(r[0], 'a')
self.assertEqual(r[2], 'abc')
class TestD(unittest.TestCase):
def test_construction_from_two_lists(self):
self.assertTrue(False) # write me
if __name__ == '__main__':
if False:
# avoid warnings from pyflakes
pdb.set_trace()
unittest.main()
|
albertz/music-player
|
refs/heads/master
|
mac/pyobjc-core/PyObjCTest/test_bundleFunctions.py
|
2
|
from __future__ import absolute_import, unicode_literals
import objc
from . import fnd as Foundation
from PyObjCTools.TestSupport import *
import os
NSObject = objc.lookUpClass('NSObject')
def S(*args):
return b''.join(args)
FUNCTIONS=[
( 'NSHomeDirectory', S(objc._C_ID)),
( 'NSIsFreedObject', S(objc._C_NSBOOL, objc._C_ID) ),
( 'NSCountFrames', S(objc._C_UINT) ),
( 'NSClassFromString', S(objc._C_CLASS, objc._C_ID) ),
]
class TestBundleFunctions (TestCase):
def setUp(self):
self.bundle = Foundation.NSBundle.bundleForClass_(Foundation.NSBundle)
def testSimple(self):
for bundle in (None, self.bundle):
d = {}
objc.loadBundleFunctions(self.bundle, d, FUNCTIONS)
self.assertIn('NSIsFreedObject', d)
self.assertIn('NSCountFrames', d)
self.assertIn('NSHomeDirectory', d)
# Don't use this API, it is unsupported and causes warnings.
#fn = d[u'NSIsFreedObject']
#obj = NSObject.alloc().init()
#value = fn(obj)
#self.assertTrue(not value)
fn = d['NSHomeDirectory']
value = fn()
self.assertEqual(value, os.path.expanduser('~'))
fn = d['NSClassFromString']
value = fn('NSObject')
self.assertIs(value, NSObject)
# Need to look for a different example, NSCountFrames crashes
# (that is the actual function, not the dynamic wrapper)
#fn = d[u'NSCountFrames']
#import Foundation
#fn = Foundation.NSCountFrames
#value = fn()
#self.assertIsInstance(value, int)
if __name__ == "__main__":
main()
|
UnicronNL/vyos-kernel-clearfog
|
refs/heads/current
|
scripts/gdb/linux/lists.py
|
630
|
#
# gdb helper commands and functions for Linux kernel debugging
#
# list tools
#
# Copyright (c) Thiebaud Weksteen, 2015
#
# Authors:
# Thiebaud Weksteen <thiebaud@weksteen.fr>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
list_head = utils.CachedType("struct list_head")
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write("Starting with: {}\n".format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if p['next'] != c.address:
gdb.write('prev.next != current: '
'current@{current_addr}={current} '
'prev@{p_addr}={p}\n'.format(
current_addr=c.address,
current=c,
p_addr=p.address,
p=p,
))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
try:
if n['prev'] != c.address:
gdb.write('next.prev != current: '
'current@{current_addr}={current} '
'next@{n_addr}={n}\n'.format(
current_addr=c.address,
current=c,
n_addr=n.address,
n=n,
))
return
except gdb.MemoryError:
gdb.write('next is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
c = n
nb += 1
if c == head:
gdb.write("list is consistent: {} node(s)\n".format(nb))
return
class LxListChk(gdb.Command):
"""Verify a list consistency"""
def __init__(self):
super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError("lx-list-check takes one argument")
list_check(gdb.parse_and_eval(argv[0]))
LxListChk()
|
jamesblunt/sympy
|
refs/heads/master
|
sympy/printing/python.py
|
118
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import keyword as kw
import sympy
from .repr import ReprPrinter
from .str import StrPrinter
# A list of classes that should be printed using StrPrinter
STRPRINT = ("Add", "Infinity", "Integer", "Mul", "NegativeInfinity",
"Pow", "Zero")
class PythonPrinter(ReprPrinter, StrPrinter):
"""A printer which converts an expression into its Python interpretation."""
def __init__(self, settings=None):
ReprPrinter.__init__(self)
StrPrinter.__init__(self, settings)
self.symbols = []
self.functions = []
# Create print methods for classes that should use StrPrinter instead
# of ReprPrinter.
for name in STRPRINT:
f_name = "_print_%s" % name
f = getattr(StrPrinter, f_name)
setattr(PythonPrinter, f_name, f)
def _print_Function(self, expr):
func = expr.func.__name__
if not hasattr(sympy, func) and not func in self.functions:
self.functions.append(func)
return StrPrinter._print_Function(self, expr)
# procedure (!) for defining symbols which have be defined in print_python()
def _print_Symbol(self, expr):
symbol = self._str(expr)
if symbol not in self.symbols:
self.symbols.append(symbol)
return StrPrinter._print_Symbol(self, expr)
def _print_module(self, expr):
raise ValueError('Modules in the expression are unacceptable')
def python(expr, **settings):
"""Return Python interpretation of passed expression
(can be passed to the exec() function without any modifications)"""
printer = PythonPrinter(settings)
exprp = printer.doprint(expr)
result = ''
# Returning found symbols and functions
renamings = {}
for symbolname in printer.symbols:
newsymbolname = symbolname
# Escape symbol names that are reserved python keywords
if kw.iskeyword(newsymbolname):
while True:
newsymbolname += "_"
if (newsymbolname not in printer.symbols and
newsymbolname not in printer.functions):
renamings[sympy.Symbol(
symbolname)] = sympy.Symbol(newsymbolname)
break
result += newsymbolname + ' = Symbol(\'' + symbolname + '\')\n'
for functionname in printer.functions:
newfunctionname = functionname
# Escape function names that are reserved python keywords
if kw.iskeyword(newfunctionname):
while True:
newfunctionname += "_"
if (newfunctionname not in printer.symbols and
newfunctionname not in printer.functions):
renamings[sympy.Function(
functionname)] = sympy.Function(newfunctionname)
break
result += newfunctionname + ' = Function(\'' + functionname + '\')\n'
if not len(renamings) == 0:
exprp = expr.subs(renamings)
result += 'e = ' + printer._str(exprp)
return result
def print_python(expr, **settings):
"""Print output of python() function"""
print(python(expr, **settings))
|
powerlim2/project_free_insight
|
refs/heads/master
|
data_api/venv/lib/python2.7/site-packages/pip/compat/ordereddict.py
|
141
|
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# flake8: noqa
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
pignacio/python-nvd3
|
refs/heads/develop
|
examples/LineChart.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Examples for Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from nvd3.lineChart import LineChart
#Open File for test
output_file = open('test_lineChart.html', 'w')
#---------------------------------------
chart = LineChart(name="lineChart", x_is_date=False, x_axis_format="AM_PM")
xdata = []
ydata = []
ydata2 = []
ydata = [0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 4, 3, 3, 5, 7, 5, 3, 16, 6, 9, 15, 4, 12]
ydata2 = [9, 8, 11, 8, 3, 7, 10, 8, 6, 6, 9, 6, 5, 4, 3, 10, 0, 6, 3, 1, 0, 0, 0, 1]
for i in range(0, 24):
xdata.append(i)
kwargs1 = {'color': 'black'}
kwargs2 = {'color': 'red'}
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"}}
chart.add_serie(y=ydata, x=xdata, name='sine', extra=extra_serie, **kwargs1)
extra_serie = {"tooltip": {"y_start": "", "y_end": " min"}}
chart.add_serie(y=ydata2, x=xdata, name='cose', extra=extra_serie, **kwargs2)
chart.buildhtml()
output_file.write(chart.htmlcontent)
#close Html file
output_file.close()
|
fibbo/DIRAC
|
refs/heads/integration
|
StorageManagementSystem/Agent/RequestPreparationAgent.py
|
7
|
# $HeadURL$
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
AGENT_NAME = 'StorageManagement/RequestPreparationAgent'
class RequestPreparationAgent( AgentModule ):
def initialize( self ):
self.fileCatalog = FileCatalog()
self.dm = DataManager()
self.stagerClient = StorageManagerClient()
self.dataIntegrityClient = DataIntegrityClient()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
def execute( self ):
res = self.prepareNewReplicas()
return res
def prepareNewReplicas( self ):
""" This is the first logical task to be executed and manages the New->Waiting transition of the Replicas
"""
res = self.__getNewReplicas()
if not res['OK']:
gLogger.fatal( "RequestPreparation.prepareNewReplicas: Failed to get replicas from StagerDB.", res['Message'] )
return res
if not res['Value']:
gLogger.info( "There were no New replicas found" )
return res
replicas = res['Value']['Replicas']
replicaIDs = res['Value']['ReplicaIDs']
gLogger.info( "RequestPreparation.prepareNewReplicas: Obtained %s New replicas for preparation." % len( replicaIDs ) )
# Check that the files exist in the FileCatalog
res = self.__getExistingFiles( replicas.keys() )
if not res['OK']:
return res
exist = res['Value']['Exist']
terminal = res['Value']['Missing']
failed = res['Value']['Failed']
if not exist:
gLogger.error( 'RequestPreparation.prepareNewReplicas: Failed determine existance of any files' )
return S_OK()
terminalReplicaIDs = {}
for lfn, reason in terminal.items():
for _se, replicaID in replicas[lfn].items():
terminalReplicaIDs[replicaID] = reason
replicas.pop( lfn )
gLogger.info( "RequestPreparation.prepareNewReplicas: %s files exist in the FileCatalog." % len( exist ) )
if terminal:
gLogger.info( "RequestPreparation.prepareNewReplicas: %s files do not exist in the FileCatalog." % len( terminal ) )
# Obtain the file sizes from the FileCatalog
res = self.__getFileSize( exist )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
terminal = res['Value']['ZeroSize']
fileSizes = res['Value']['FileSizes']
if not fileSizes:
gLogger.error( 'RequestPreparation.prepareNewReplicas: Failed determine sizes of any files' )
return S_OK()
for lfn, reason in terminal.items():
for _se, replicaID in replicas[lfn].items():
terminalReplicaIDs[replicaID] = reason
replicas.pop( lfn )
gLogger.info( "RequestPreparation.prepareNewReplicas: Obtained %s file sizes from the FileCatalog." % len( fileSizes ) )
if terminal:
gLogger.info( "RequestPreparation.prepareNewReplicas: %s files registered with zero size in the FileCatalog." % len( terminal ) )
# Obtain the replicas from the FileCatalog
res = self.__getFileReplicas( fileSizes.keys() )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
terminal = res['Value']['ZeroReplicas']
fileReplicas = res['Value']['Replicas']
if not fileReplicas:
gLogger.error( 'RequestPreparation.prepareNewReplicas: Failed determine replicas for any files' )
return S_OK()
for lfn, reason in terminal.items():
for _se, replicaID in replicas[lfn].items():
terminalReplicaIDs[replicaID] = reason
replicas.pop( lfn )
gLogger.info( "RequestPreparation.prepareNewReplicas: Obtained replica information for %s file from the FileCatalog." % len( fileReplicas ) )
if terminal:
gLogger.info( "RequestPreparation.prepareNewReplicas: %s files registered with zero replicas in the FileCatalog." % len( terminal ) )
# Check the replicas exist at the requested site
replicaMetadata = []
for lfn, requestedSEs in replicas.items():
lfnReplicas = fileReplicas[lfn]
for requestedSE, replicaID in requestedSEs.items():
if not requestedSE in lfnReplicas.keys():
terminalReplicaIDs[replicaID] = "LFN not registered at requested SE"
replicas[lfn].pop( requestedSE )
else:
replicaMetadata.append( ( replicaID, lfnReplicas[requestedSE], fileSizes[lfn] ) )
# Update the states of the files in the database
if terminalReplicaIDs:
gLogger.info( "RequestPreparation.prepareNewReplicas: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
# res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "RequestPreparation.prepareNewReplicas: Failed to update replica failures.", res['Message'] )
if replicaMetadata:
gLogger.info( "RequestPreparation.prepareNewReplicas: %s replica metadata to be updated." % len( replicaMetadata ) )
# Sets the Status='Waiting' of CacheReplicas records that are OK with catalogue checks
res = self.stagerClient.updateReplicaInformation( replicaMetadata )
if not res['OK']:
gLogger.error( "RequestPreparation.prepareNewReplicas: Failed to update replica metadata.", res['Message'] )
return S_OK()
def __getNewReplicas( self ):
""" This obtains the New replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the New replicas from the CacheReplicas table
res = self.stagerClient.getCacheReplicas( {'Status':'New'} )
if not res['OK']:
gLogger.error( "RequestPreparation.__getNewReplicas: Failed to get replicas with New status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "RequestPreparation.__getNewReplicas: No New replicas found to process." )
return S_OK()
else:
gLogger.debug( "RequestPreparation.__getNewReplicas: Obtained %s New replicas(s) to process." % len( res['Value'] ) )
replicas = {}
replicaIDs = {}
for replicaID, info in res['Value'].items():
lfn = info['LFN']
storageElement = info['SE']
if not replicas.has_key( lfn ):
replicas[lfn] = {}
replicas[lfn][storageElement] = replicaID
replicaIDs[replicaID] = ( lfn, storageElement )
return S_OK( {'Replicas':replicas, 'ReplicaIDs':replicaIDs} )
def __getExistingFiles( self, lfns ):
""" This checks that the files exist in the FileCatalog. """
filesExist = []
missing = {}
res = self.fileCatalog.exists( lfns )
if not res['OK']:
gLogger.error( "RequestPreparation.__getExistingFiles: Failed to determine whether files exist.", res['Message'] )
return res
failed = res['Value']['Failed']
for lfn, exists in res['Value']['Successful'].items():
if exists:
filesExist.append( lfn )
else:
missing[lfn] = 'LFN not registered in the FileCatalog'
if missing:
for lfn, reason in missing.items():
gLogger.warn( "RequestPreparation.__getExistingFiles: %s" % reason, lfn )
self.__reportProblematicFiles( missing.keys(), 'LFN-LFC-DoesntExist' )
return S_OK( {'Exist':filesExist, 'Missing':missing, 'Failed':failed} )
def __getFileSize( self, lfns ):
""" This obtains the file size from the FileCatalog. """
fileSizes = {}
zeroSize = {}
res = self.fileCatalog.getFileSize( lfns )
if not res['OK']:
gLogger.error( "RequestPreparation.__getFileSize: Failed to get sizes for files.", res['Message'] )
return res
failed = res['Value']['Failed']
for lfn, size in res['Value']['Successful'].items():
if size == 0:
zeroSize[lfn] = "LFN registered with zero size in the FileCatalog"
else:
fileSizes[lfn] = size
if zeroSize:
for lfn, reason in zeroSize.items():
gLogger.warn( "RequestPreparation.__getFileSize: %s" % reason, lfn )
self.__reportProblematicFiles( zeroSize.keys(), 'LFN-LFC-ZeroSize' )
return S_OK( {'FileSizes':fileSizes, 'ZeroSize':zeroSize, 'Failed':failed} )
def __getFileReplicas( self, lfns ):
""" This obtains the replicas from the FileCatalog. """
replicas = {}
noReplicas = {}
res = self.dm.getActiveReplicas( lfns )
if not res['OK']:
gLogger.error( "RequestPreparation.__getFileReplicas: Failed to obtain file replicas.", res['Message'] )
return res
failed = res['Value']['Failed']
for lfn, lfnReplicas in res['Value']['Successful'].items():
if len( lfnReplicas.keys() ) == 0:
noReplicas[lfn] = "LFN registered with zero replicas in the FileCatalog"
else:
replicas[lfn] = lfnReplicas
if noReplicas:
for lfn, reason in noReplicas.items():
gLogger.warn( "RequestPreparation.__getFileReplicas: %s" % reason, lfn )
self.__reportProblematicFiles( noReplicas.keys(), 'LFN-LFC-NoReplicas' )
return S_OK( {'Replicas':replicas, 'ZeroReplicas':noReplicas, 'Failed':failed} )
def __reportProblematicFiles( self, lfns, reason ):
return S_OK()
#res = self.dataIntegrityClient.setFileProblematic( lfns, reason, sourceComponent = 'RequestPreparationAgent' )
#if not res['OK']:
# gLogger.error( "RequestPreparation.__reportProblematicFiles: Failed to report missing files.", res['Message'] )
# return res
#if res['Value']['Successful']:
# gLogger.info( "RequestPreparation.__reportProblematicFiles: Successfully reported %s missing files." % len( res['Value']['Successful'] ) )
#if res['Value']['Failed']:
# gLogger.info( "RequestPreparation.__reportProblematicFiles: Failed to report %s problematic files." % len( res['Value']['Failed'] ) )
#return res
|
bodi000/odoo
|
refs/heads/master
|
addons/project_issue/project_issue.py
|
33
|
#-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.addons.crm import crm
from openerp.osv import fields, osv, orm
from openerp.tools import html2plaintext
from openerp.tools.translate import _
class project_issue_version(osv.Model):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', size=32, required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
class project_issue(osv.Model):
_name = "project.issue"
_description = "Project Issue"
_order = "priority, create_date desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'project_issue.mt_issue_new': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence <= 1,
'project_issue.mt_issue_stage': lambda self, cr, uid, obj, ctx=None: obj.stage_id and obj.stage_id.sequence > 1,
},
'user_id': {
'project_issue.mt_issue_assigned': lambda self, cr, uid, obj, ctx=None: obj.user_id and obj.user_id.id,
},
'kanban_state': {
'project_issue.mt_issue_blocked': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'blocked',
'project_issue.mt_issue_ready': lambda self, cr, uid, obj, ctx=None: obj.kanban_state == 'done',
},
}
def _get_default_partner(self, cr, uid, context=None):
project_id = self._get_default_project_id(cr, uid, context)
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_project_id(self, cr, uid, context=None):
""" Gives default project by checking if present in the context """
return self._resolve_project_id_from_context(cr, uid, context=context)
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
project_id = self._get_default_project_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], project_id, [('fold', '=', False)], context=context)
def _resolve_project_id_from_context(self, cr, uid, context=None):
""" Returns ID of project based on the value of 'default_project_id'
context key, or None if it cannot be resolved to a single
project.
"""
if context is None:
context = {}
if type(context.get('default_project_id')) in (int, long):
return context.get('default_project_id')
if isinstance(context.get('default_project_id'), basestring):
project_name = context['default_project_id']
project_ids = self.pool.get('project.project').name_search(cr, uid, name=project_name, context=context)
if len(project_ids) == 1:
return int(project_ids[0][0])
return None
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('project_ids', 'in', project_id), ('fold', '=', False) if project_id: add project columns that are not folded
search_domain = []
project_id = self._resolve_project_id_from_context(cr, uid, context=context)
if project_id:
search_domain += ['|', ('project_ids', '=', project_id)]
search_domain += [('id', 'in', ids)]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x,y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
cal_obj = self.pool.get('resource.calendar')
res_obj = self.pool.get('resource.resource')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
# if the working hours on the project are not defined, use default ones (8 -> 12 and 13 -> 17 * 5), represented by None
if not issue.project_id or not issue.project_id.resource_calendar_id:
working_hours = None
else:
working_hours = issue.project_id.resource_calendar_id.id
res[issue.id] = {}
for field in fields:
duration = 0
ans = False
hours = 0
date_create = datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
if field in ['working_hours_open','day_open']:
if issue.date_open:
date_open = datetime.strptime(issue.date_open, "%Y-%m-%d %H:%M:%S")
ans = date_open - date_create
date_until = issue.date_open
#Calculating no. of working hours to open the issue
hours = cal_obj._interval_hours_get(cr, uid, working_hours,
date_create,
date_open,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False,
context=context)
elif field in ['working_hours_close','day_close']:
if issue.date_closed:
date_close = datetime.strptime(issue.date_closed, "%Y-%m-%d %H:%M:%S")
date_until = issue.date_closed
ans = date_close - date_create
#Calculating no. of working hours to close the issue
hours = cal_obj._interval_hours_get(cr, uid, working_hours,
date_create,
date_close,
timezone_from_uid=issue.user_id.id or uid,
exclude_leaves=False,
context=context)
elif field in ['days_since_creation']:
if issue.create_date:
days_since_creation = datetime.today() - datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
res[issue.id][field] = days_since_creation.days
continue
elif field in ['inactivity_days']:
res[issue.id][field] = 0
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, '%Y-%m-%d %H:%M:%S')
res[issue.id][field] = inactive_days.days
continue
if ans:
resource_id = False
if issue.user_id:
resource_ids = res_obj.search(cr, uid, [('user_id','=',issue.user_id.id)])
if resource_ids and len(resource_ids):
resource_id = resource_ids[0]
duration = float(ans.days) + float(ans.seconds)/(24*3600)
if field in ['working_hours_open','working_hours_close']:
res[issue.id][field] = hours
elif field in ['day_open','day_close']:
res[issue.id][field] = duration
return res
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
def on_change_project(self, cr, uid, ids, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', size=128, required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True,select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Contact', select=1),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Private Note'),
'kanban_state': fields.selection([('normal', 'Normal'),('blocked', 'Blocked'),('done', 'Ready for next stage')], 'Kanban State',
track_visibility='onchange',
help="A Issue's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this issue\n"
" * Ready for next stage indicates the issue is ready to be pulled to the next stage",
required=False),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Opened', readonly=True,select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True,select=True),
'date': fields.datetime('Date'),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
'channel_id': fields.many2one('crm.case.channel', 'Channel', help="Communication channel."),
'categ_ids': fields.many2many('project.category', string='Tags'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'stage_id': fields.many2one ('project.task.type', 'Stage',
track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]"),
'project_id': fields.many2one('project.project', 'Project', track_visibility='onchange', select=True),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]"),
'day_open': fields.function(_compute_day, string='Days to Open', \
multi='compute_day', type="float", store=True),
'day_close': fields.function(_compute_day, string='Days to Close', \
multi='compute_day', type="float", store=True),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1, track_visibility='onchange'),
'working_hours_open': fields.function(_compute_day, string='Working Hours to Open the Issue', \
multi='compute_day', type="float", store=True),
'working_hours_close': fields.function(_compute_day, string='Working Hours to Close the Issue', \
multi='compute_day', type="float", store=True),
'inactivity_days': fields.function(_compute_day, string='Days since last action', \
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['progress'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
_defaults = {
'active': 1,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '1',
'kanban_state': 'normal',
'date_last_stage_update': fields.datetime.now,
'user_id': lambda obj, cr, uid, context: uid,
}
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, id, ['name'], context=context)
if not default:
default = {}
default = default.copy()
default.update(name=_('%s (copy)') % (issue['name']))
return super(project_issue, self).copy(cr, uid, id, default=default,
context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(project_issue, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_start
if vals.get('user_id'):
vals['date_start'] = fields.datetime.now()
return super(project_issue, self).write(cr, uid, ids, vals, context)
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
if not task_id:
return {'value': {}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value': {'user_id': task.user_id.id, }}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
""" This function returns value of partner email address based on partner
:param part: Partner's id
"""
result = {}
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context)
result['email_from'] = partner.email
return {'value': result}
def get_empty_list_help(self, cr, uid, help, context=None):
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_document_name'] = _("issues")
return super(project_issue, self).get_empty_list_help(cr, uid, help, context=context)
# -------------------------------------------------------
# Stage management
# -------------------------------------------------------
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the issue:
- type: stage type must be the same or 'both'
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * (len(section_ids)-1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_escalate(self, cr, uid, ids, context=None): # FIXME rename this method to issue_escalate
for issue in self.browse(cr, uid, ids, context=context):
data = {}
esc_proj = issue.project_id.project_escalation_id
if not esc_proj:
raise osv.except_osv(_('Warning!'), _('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
data['project_id'] = esc_proj.id
if esc_proj.user_id:
data['user_id'] = esc_proj.user_id.id
issue.write(data)
if issue.task_id:
issue.task_id.write({'project_id': esc_proj.id, 'user_id': False})
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
return [issue.project_id.message_get_reply_to()[0] if issue.project_id else False
for issue in self.browse(cr, uid, ids, context=context)]
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(project_issue, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for issue in self.browse(cr, uid, ids, context=context):
if issue.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, issue, partner=issue.partner_id, reason=_('Customer'))
elif issue.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, issue, email=issue.email_from, reason=_('Customer Email'))
except (osv.except_osv, orm.except_orm): # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
if context is None:
context = {}
context['state_to'] = 'draft'
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
defaults.update(custom_values)
res_id = super(project_issue, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
return res_id
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, content_subtype='html', **kwargs):
""" Overrides mail_thread message_post so that we can set the date of last action field when
a new message is posted on the issue.
"""
if context is None:
context = {}
res = super(project_issue, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
if thread_id and subtype:
self.write(cr, SUPERUSER_ID, thread_id, {'date_action_last': fields.datetime.now()}, context=context)
return res
class project(osv.Model):
_inherit = "project.project"
def _get_alias_models(self, cr, uid, context=None):
return [('project.task', "Tasks"), ("project.issue", "Issues")]
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
project_id: Issue.search_count(cr,uid, [('project_id', '=', project_id), ('stage_id.fold', '=', False)], context=context)
for project_id in ids
}
_columns = {
'project_escalation_id': fields.many2one('project.project', 'Project Escalation',
help='If any issue is escalated from the current Project, it will be listed under the project selected here.',
states={'close': [('readonly', True)], 'cancelled': [('readonly', True)]}),
'issue_count': fields.function(_issue_count, type='integer', string="Issues",),
'issue_ids': fields.one2many('project.issue', 'project_id',
domain=[('stage_id.fold', '=', False)])
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
class account_analytic_account(osv.Model):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
_columns = {
'use_issues': fields.boolean('Issues', help="Check this field if this project manages issues"),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_issues'] = template.use_issues
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
if context is None:
context = {}
res = super(account_analytic_account, self)._trigger_project_creation(cr, uid, vals, context=context)
return res or (vals.get('use_issues') and not 'project_creation_in_progress' in context)
class project_project(osv.Model):
_inherit = 'project.project'
_defaults = {
'use_issues': True
}
def _check_create_write_values(self, cr, uid, vals, context=None):
""" Perform some check on values given to create or write. """
# Handle use_tasks / use_issues: if only one is checked, alias should take the same model
if vals.get('use_tasks') and not vals.get('use_issues'):
vals['alias_model'] = 'project.task'
elif vals.get('use_issues') and not vals.get('use_tasks'):
vals['alias_model'] = 'project.issue'
def on_change_use_tasks_or_issues(self, cr, uid, ids, use_tasks, use_issues, context=None):
values = {}
if use_tasks and not use_issues:
values['alias_model'] = 'project.task'
elif not use_tasks and use_issues:
values['alias_model'] = 'project.issues'
return {'value': values}
def create(self, cr, uid, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
self._check_create_write_values(cr, uid, vals, context=context)
return super(project_project, self).write(cr, uid, ids, vals, context=context)
class res_partner(osv.osv):
def _issue_count(self, cr, uid, ids, field_name, arg, context=None):
Issue = self.pool['project.issue']
return {
partner_id: Issue.search_count(cr,uid, [('partner_id', '=', partner_id)])
for partner_id in ids
}
""" Inherits partner and adds Issue information in the partner form """
_inherit = 'res.partner'
_columns = {
'issue_count': fields.function(_issue_count, string='# Issues', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
dscdac/Proyecto-IV-modulo2
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py
|
1093
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
antiface/audiolazy
|
refs/heads/master
|
examples/dft_pitch.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of AudioLazy, the signal processing Python package.
# Copyright (C) 2012-2014 Danilo de Jesus da Silva Bellini
#
# AudioLazy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Created on Wed May 01 2013
# danilo [dot] bellini [at] gmail [dot] com
"""
Pitch follower via DFT peak with Tkinter GUI
"""
# ------------------------
# AudioLazy pitch follower
# ------------------------
import sys
from audiolazy import (tostream, AudioIO, freq2str, sHz, chunks,
lowpass, envelope, pi, thub, Stream, maverage)
from numpy.fft import rfft
def limiter(sig, threshold=.1, size=256, env=envelope.rms, cutoff=pi/2048):
sig = thub(sig, 2)
return sig * Stream( 1. if el <= threshold else threshold / el
for el in maverage(size)(env(sig, cutoff=cutoff)) )
@tostream
def dft_pitch(sig, size=2048, hop=None):
for blk in Stream(sig).blocks(size=size, hop=hop):
dft_data = rfft(blk)
idx, vmax = max(enumerate(dft_data),
key=lambda el: abs(el[1]) / (2 * el[0] / size + 1)
)
yield 2 * pi * idx / size
def pitch_from_mic(upd_time_in_ms):
rate = 44100
s, Hz = sHz(rate)
api = sys.argv[1] if sys.argv[1:] else None # Choose API via command-line
chunks.size = 1 if api == "jack" else 16
with AudioIO(api=api) as recorder:
snd = recorder.record(rate=rate)
sndlow = lowpass(400 * Hz)(limiter(snd, cutoff=20 * Hz))
hop = int(upd_time_in_ms * 1e-3 * s)
for pitch in freq2str(dft_pitch(sndlow, size=2*hop, hop=hop) / Hz):
yield pitch
# ----------------
# GUI with tkinter
# ----------------
if __name__ == "__main__":
try:
import tkinter
except ImportError:
import Tkinter as tkinter
import threading
import re
# Window (Tk init), text label and button
tk = tkinter.Tk()
tk.title(__doc__.strip().splitlines()[0])
lbldata = tkinter.StringVar(tk)
lbltext = tkinter.Label(tk, textvariable=lbldata, font=("Purisa", 72),
width=10)
lbltext.pack(expand=True, fill=tkinter.BOTH)
btnclose = tkinter.Button(tk, text="Close", command=tk.destroy,
default="active")
btnclose.pack(fill=tkinter.X)
# Needed data
regex_note = re.compile(r"^([A-Gb#]*-?[0-9]*)([?+-]?)(.*?%?)$")
upd_time_in_ms = 200
# Update functions for each thread
def upd_value(): # Recording thread
pitches = iter(pitch_from_mic(upd_time_in_ms))
while not tk.should_finish:
tk.value = next(pitches)
def upd_timer(): # GUI mainloop thread
lbldata.set("\n".join(regex_note.findall(tk.value)[0]))
tk.after(upd_time_in_ms, upd_timer)
# Multi-thread management initialization
tk.should_finish = False
tk.value = freq2str(0) # Starting value
lbldata.set(tk.value)
tk.upd_thread = threading.Thread(target=upd_value)
# Go
tk.upd_thread.start()
tk.after_idle(upd_timer)
tk.mainloop()
tk.should_finish = True
tk.upd_thread.join()
|
md2/pyhomelib
|
refs/heads/master
|
pyhomelib/statisticsdialog.py
|
1
|
# -*- coding: utf-8 -*-
# vim: ts=4 sw=4 et tw=79 sts=4 ai si
from PyQt4 import QtCore, QtGui
from ui.statisticsdialog import Ui_StatisticsDialog
class StatisticsDialog(QtGui.QDialog, Ui_StatisticsDialog):
def __init__(self, db, parent=None):
super(StatisticsDialog, self).__init__(parent)
self.setupUi(self)
numberOfBooks = db.getBookCount()
numberOfAuthors = db.execScalar("SELECT COUNT(*) FROM libauthorname").toInt()[0]
numberOfSequences = db.execScalar("SELECT COUNT(*) FROM libseqname").toInt()[0]
totalSize = db.execScalar("SELECT SUM(filesize) FROM libbook").toLongLong()[0]
self.numberOfBooksLabel.setNum(numberOfBooks)
self.numberOfAuthorsLabel.setNum(numberOfAuthors)
self.numberOfSequencesLabel.setNum(numberOfSequences)
self.totalSizeLabel.setText(QtCore.QLocale.system().toString(totalSize)
+ " " + self.tr("bytes"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.