repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
dohop/supervisor-logstash-notifier | tests/test_include.py | 1 | 2353 | #
# Copyright 2016 Dohop hf.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the inclusion logic
"""
import os
from logstash_notifier import get_value_from_input
from .compat import TestCase
class TestIncludeParser(TestCase):
"""
Tests the parsing of the include options
"""
def test_key_val_parsing(self):
# Test parsing of keyval strings
self.assertEqual(
get_value_from_input('fruits="pear,kiwi,banana"'),
{'fruits': '"pear,kiwi,banana"'}
)
self.assertEqual(
get_value_from_input('berries='),
{'berries': ''}
)
self.assertEqual(
get_value_from_input('pythagoras=a2+b2=c2'),
{'pythagoras': 'a2+b2=c2'}
)
def test_environ_extraction(self):
# Test inclusion of variables from the environ
os.environ['vegetables'] = '"carrot,peas,green beans"'
os.environ['smellythings'] = ''
self.assertEqual(
get_value_from_input('vegetables'),
{'vegetables': '"carrot,peas,green beans"'}
)
self.assertEqual(
get_value_from_input('smellythings'),
{'smellythings': ''}
)
def test_combination(self):
# Test having both environment vars and arbitrary keyvals
os.environ['bears'] = 'polar,brown,black'
os.environ['notbears'] = 'unicorn,griffin,sphinx,otter'
command_line = ['bears', 'notbears', 'e=mc2', 'v=iR', 'qwertyuiop']
expected = {
'bears': 'polar,brown,black',
'notbears': 'unicorn,griffin,sphinx,otter',
'e': 'mc2',
'v': 'iR',
}
result = {}
for variable in command_line:
result.update(get_value_from_input(variable))
self.assertDictEqual(result, expected)
| apache-2.0 |
vlas-sokolov/pyspeckit | examples/ammonia_fit_example.py | 6 | 3890 | from __future__ import print_function
import pyspeckit
import numpy as np
# Grab a .fits spectrum with a legitimate header
sp = pyspeckit.Spectrum('G031.947+00.076_nh3_11_Tastar.fits')
""" HEADER:
SIMPLE = T / Written by IDL: Tue Aug 31 18:17:01 2010
BITPIX = -64
NAXIS = 1 / number of array dimensions
NAXIS1 = 8192 /Number of positions along axis 1
CDELT1 = -0.077230503
CRPIX1 = 4096.0000
CRVAL1 = 68.365635
CTYPE1 = 'VRAD'
CUNIT1 = 'km/s '
SPECSYS = 'LSRK'
RESTFRQ = 2.3694500e+10
VELOSYS = -43755.930
CDELT1F = 6103.5156
CRPIX1F = 4096.0000
CRVAL1F = 2.3692555e+10
CTYPE1F = 'FREQ'
CUNIT1F = 'Hz'
SPECSYSF= 'LSRK'
RESTFRQF= 2.3694500e+10
VELOSYSF= -43755.930
VDEF = 'RADI-LSR'
SRCVEL = 70.000000
ZSOURCE = 0.00023349487
BUNIT = 'K '
OBJECT = 'G031.947+00.076'
TELESCOP= 'GBT'
TSYS = 42.1655
ELEV = 34.904846
AIRMASS = 1.7475941
LINE = 'nh3_11'
FREQ = 23.692555
TARGLON = 31.947236
TARGLAT = 0.076291610
MJD-AVG = 54548.620
CONTINUU= 0.0477613
CONTERR = 0.226990
SMTHOFF = 0
COMMENT 1 blank line
END
"""
# Start by computing the error using a reasonably signal-free region
sp.error[:] = sp.stats((-100, 50))['std']
# Change the plot range to be a reasonable physical coverage (the default is to
# plot the whole 8192 channel spectrum)
sp.plotter(xmin=-100,xmax=300)
# There are many extra channels, so let's smooth. Default is a Gaussian
# smooth. Downsampling helps speed up the fitting (assuming the line is still
# Nyquist sampled, which it is)
sp.smooth(2)
# replot after smoothing
sp.plotter(xmin=-100,xmax=300)
# First, fit a gaussian to the whole spectrum as a "first guess" (good at
# picking up the centroid, bad at getting the width right)
# negamp=False forces the fitter to search for a positive peak, not the
# negatives created in this spectrum by frequency switching
sp.specfit.selectregion(xmin=60,xmax=120,xtype='wcs')
sp.specfit(negamp=False, guesses='moments')
# sp.specfit(negamp=False, guesses=[5.9,4.45,8.3e14,0.84,96.2,0.43])
# Save the fit...
sp.plotter.figure.savefig('nh3_gaussfit.png')
# and print some information to screen
print("Guesses: ", sp.specfit.guesses)
print("Best fit: ", sp.specfit.modelpars)
# Run the ammonia spec fitter with a reasonable guess
# Parameters are Trot, Tex, column, width, centroid, ortho fraction
# Since we only have a single line (1-1), the kinetic temperature is
# unconstrained: we'll fix it at 7 K. Similarly, the ortho fraction
# is fixed to 0.5
T=True; F=False
sp.specfit(fittype='ammonia',
guesses=[7,4.45,np.log10(8.3e14),0.84,96.2,0.5],
fixed=[T,F,F,F,F,T],
quiet=False)
# sp.specfit.peakbgfit(fittype='ammonia', guesses=[5.9,4.45,8.3e14,0.84,96.2,0.43],quiet=False)
# plot up the residuals in a different window. The residuals strongly suggest
# the presence of a second velocity component.
sp.specfit.plotresiduals()
sp.plotter.figure.savefig('nh3_ammonia_fit.png')
print("Guesses: ", sp.specfit.guesses)
print("Best fit: ", sp.specfit.modelpars)
# re-plot zoomed in
sp.plotter(xmin=70,xmax=125)
# replot the fit
sp.specfit.plot_fit()
sp.plotter.figure.savefig('nh3_ammonia_fit_zoom.png')
# refit with two components
sp.specfit(fittype='ammonia',
guesses=[7,3.5,np.log10(5e14),0.68,97.3,0.5]+[7,4.2,np.log10(7e14),0.52,95.8,0.5],
fixed=[T,F,F,F,F,T]*2,
quiet=False)
sp.specfit.plotresiduals()
sp.plotter.figure.savefig('nh3_ammonia_multifit_zoom.png')
sp.specfit.plot_fit(show_hyperfine_components=True)
assert len(sp.specfit._plotted_components) == 202
x1,y1 = (sp.specfit._plotted_components)[0].get_data()
x2,y2 = (sp.specfit._plotted_components)[1].get_data()
# make sure they are not all identical (regression test for when the cumulative
# spectrum was being added N times)
assert not np.all(y1==y2)
| mit |
KasenJ/CommunityPython | code/push/google/protobuf/internal/message_test.py | 224 | 22295 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import copy
import math
import operator
import pickle
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import test_util
from google.protobuf import message
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
class MessageTest(unittest.TestCase):
def testGoldenMessage(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenExtensions(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFile('golden_packed_fields_message').read()
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEquals(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleSupport(self):
golden_data = test_util.GoldenFile('golden_message').read()
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEquals(unpickled_message, golden_message)
self.assertEquals(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
def testPositiveInfinity(self):
golden_data = ('\x5D\x00\x00\x80\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
'\xCD\x02\x00\x00\x80\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self):
golden_data = ('\x5D\x00\x00\x80\xFF'
'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
'\xCD\x02\x00\x00\x80\xFF'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self):
golden_data = ('\x5D\x00\x00\xC0\x7F'
'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
'\xCD\x02\x00\x00\xC0\x7F'
'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\x80\xFF'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self):
golden_data = ('\xA2\x06\x04\x00\x00\xC0\x7F'
'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = unittest_pb2.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = unittest_pb2.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeDoubleValues(self):
message = unittest_pb2.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testSortingRepeatedScalarFieldsDefaultComparator(self):
"""Check some different types with the default comparator."""
message = unittest_pb2.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append('a')
message.repeated_bytes.append('c')
message.repeated_bytes.append('b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], 'a')
self.assertEqual(message.repeated_bytes[1], 'b')
self.assertEqual(message.repeated_bytes[2], 'c')
def testSortingRepeatedScalarFieldsCustomComparator(self):
"""Check some different types with custom comparator."""
message = unittest_pb2.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(lambda x,y: cmp(abs(x), abs(y)))
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(lambda x,y: cmp(len(x), len(y)))
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self):
"""Check passing a custom comparator to sort a repeated composite field."""
message = unittest_pb2.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(lambda x,y: cmp(x.bb, y.bb))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
def testRepeatedCompositeFieldSortArguments(self):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self):
"""Check sorting a scalar field using list.sort() arguments."""
message = unittest_pb2.TestAllTypes()
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def testSortEmptyRepeatedCompositeContainer(self):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = unittest_pb2.TestAllTypes()
m.repeated_nested_message.sort()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
kobejean/tensorflow | tensorflow/python/ops/boosted_trees_ops.py | 9 | 8018 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for boosted_trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_boosted_trees_ops
from tensorflow.python.ops import resources
# Re-exporting ops used by other modules.
# pylint: disable=unused-import
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_bucketize
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_calculate_best_gains_per_feature as calculate_best_gains_per_feature
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_center_bias as center_bias
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_create_quantile_stream_resource as create_quantile_stream_resource
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_example_debug_outputs as example_debug_outputs
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_quantile_summaries as make_quantile_summaries
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_make_stats_summary as make_stats_summary
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_predict as predict
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_add_summaries as quantile_add_summaries
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_flush as quantile_flush
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_quantile_stream_resource_get_bucket_boundaries as get_bucket_boundaries
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_training_predict as training_predict
from tensorflow.python.ops.gen_boosted_trees_ops import boosted_trees_update_ensemble as update_ensemble
# pylint: enable=unused-import
from tensorflow.python.training import saver
class PruningMode(object):
"""Class for working with Pruning modes."""
NO_PRUNING, PRE_PRUNING, POST_PRUNING = range(0, 3)
_map = {'none': NO_PRUNING, 'pre': PRE_PRUNING, 'post': POST_PRUNING}
@classmethod
def from_str(cls, mode):
if mode in cls._map:
return cls._map[mode]
else:
raise ValueError('pruning_mode mode must be one of: {}'.format(', '.join(
sorted(cls._map))))
class _TreeEnsembleSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for TreeEnsemble."""
def __init__(self, resource_handle, create_op, name):
"""Creates a _TreeEnsembleSavable object.
Args:
resource_handle: handle to the decision tree ensemble variable.
create_op: the op to initialize the variable.
name: the name to save the tree ensemble variable under.
"""
stamp_token, serialized = (
gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle))
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree ensemble variable. So we just pass an empty
# value.
slice_spec = ''
specs = [
saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec,
name + '_stamp'),
saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec,
name + '_serialized'),
]
super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name)
self._resource_handle = resource_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree ensemble from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree ensemble variable.
"""
with ops.control_dependencies([self._create_op]):
return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(
self._resource_handle,
stamp_token=restored_tensors[0],
tree_ensemble_serialized=restored_tensors[1])
class TreeEnsemble(object):
"""Creates TreeEnsemble resource."""
def __init__(self, name, stamp_token=0, is_local=False, serialized_proto=''):
with ops.name_scope(name, 'TreeEnsemble') as name:
self._resource_handle = (
gen_boosted_trees_ops.boosted_trees_ensemble_resource_handle_op(
container='', shared_name=name, name=name))
create_op = gen_boosted_trees_ops.boosted_trees_create_ensemble(
self.resource_handle,
stamp_token,
tree_ensemble_serialized=serialized_proto)
is_initialized_op = (
gen_boosted_trees_ops.is_boosted_trees_ensemble_initialized(
self._resource_handle))
# Adds the variable to the savable list.
if not is_local:
saveable = _TreeEnsembleSavable(self.resource_handle, create_op,
self.resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
resources.register_resource(
self.resource_handle,
create_op,
is_initialized_op,
is_shared=not is_local)
@property
def resource_handle(self):
return self._resource_handle
def get_stamp_token(self):
"""Returns the current stamp token of the resource."""
stamp_token, _, _, _, _ = (
gen_boosted_trees_ops.boosted_trees_get_ensemble_states(
self.resource_handle))
return stamp_token
def get_states(self):
"""Returns states of the tree ensemble.
Returns:
stamp_token, num_trees, num_finalized_trees, num_attempted_layers and
range of the nodes in the latest layer.
"""
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = (
gen_boosted_trees_ops.boosted_trees_get_ensemble_states(
self.resource_handle))
# Use identity to give names.
return (array_ops.identity(stamp_token, name='stamp_token'),
array_ops.identity(num_trees, name='num_trees'),
array_ops.identity(num_finalized_trees, name='num_finalized_trees'),
array_ops.identity(
num_attempted_layers, name='num_attempted_layers'),
array_ops.identity(nodes_range, name='last_layer_nodes_range'))
def serialize(self):
"""Serializes the ensemble into proto and returns the serialized proto.
Returns:
stamp_token: int64 scalar Tensor to denote the stamp of the resource.
serialized_proto: string scalar Tensor of the serialized proto.
"""
return gen_boosted_trees_ops.boosted_trees_serialize_ensemble(
self.resource_handle)
def deserialize(self, stamp_token, serialized_proto):
"""Deserialize the input proto and resets the ensemble from it.
Args:
stamp_token: int64 scalar Tensor to denote the stamp of the resource.
serialized_proto: string scalar Tensor of the serialized proto.
Returns:
Operation (for dependencies).
"""
return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(
self.resource_handle, stamp_token, serialized_proto)
| apache-2.0 |
Just-D/chromium-1 | tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/pass_through.py | 31 | 1161 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class PassThroughPage(page_module.Page):
"""
A test page for the chrome proxy pass-through tests.
"""
def __init__(self, url, page_set):
super(PassThroughPage, self).__init__(url=url, page_set=page_set)
def RunNavigateSteps(self, action_runner):
super(PassThroughPage, self).RunNavigateSteps(action_runner)
action_runner.ExecuteJavaScript('''
(function() {
var request = new XMLHttpRequest();
request.open("GET", "%s");
request.setRequestHeader("Chrome-Proxy", "pass-through");
request.send(null);
})();''' % (self.url))
action_runner.Wait(1)
class PassThroughStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(PassThroughStorySet, self).__init__()
urls_list = [
'http://check.googlezip.net/image.png',
]
for url in urls_list:
self.AddStory(PassThroughPage(url, self)) | bsd-3-clause |
chenruixuan/PopClip-Extensions | source/Trello/requests/status_codes.py | 926 | 3200 | # -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
| mit |
oklink-dev/bitcoin | qa/rpc-tests/test_framework/comptool.py | 56 | 18090 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
'''
This is a tool for comparing two or more bitcoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print("Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ])
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| mit |
jhjguxin/blogserver | lib/python2.7/site-packages/django/core/mail/__init__.py | 229 | 5072 | """
Tools for sending email.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
# Imported for backwards compatibility, and for the sake
# of a cleaner namespace. These symbols used to be in
# django/core/mail.py before the introduction of email
# backends and the subsequent reorganization (See #10355)
from django.core.mail.utils import CachedDnsName, DNS_NAME
from django.core.mail.message import \
EmailMessage, EmailMultiAlternatives, \
SafeMIMEText, SafeMIMEMultipart, \
DEFAULT_ATTACHMENT_MIME_TYPE, make_msgid, \
BadHeaderError, forbid_multi_line_headers
from django.core.mail.backends.smtp import EmailBackend as _SMTPConnection
def get_connection(backend=None, fail_silently=False, **kwds):
"""Load an e-mail backend and return an instance of it.
If backend is None (default) settings.EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
path = backend or settings.EMAIL_BACKEND
try:
mod_name, klass_name = path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError, e:
raise ImproperlyConfigured(('Error importing email backend module %s: "%s"'
% (mod_name, e)))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured(('Module "%s" does not define a '
'"%s" class' % (mod_name, klass_name)))
return klass(fail_silently=fail_silently, **kwds)
def send_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
return EmailMessage(subject, message, from_email, recipient_list,
connection=connection).send()
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of e-mails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
messages = [EmailMessage(subject, message, sender, recipient)
for subject, message, sender, recipient in datatuple]
return connection.send_messages(messages)
def mail_admins(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the admins, as defined by the ADMINS setting."""
if not settings.ADMINS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
def mail_managers(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
if not settings.MANAGERS:
return
mail = EmailMultiAlternatives(u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
class SMTPConnection(_SMTPConnection):
def __init__(self, *args, **kwds):
import warnings
warnings.warn(
'mail.SMTPConnection is deprecated; use mail.get_connection() instead.',
DeprecationWarning
)
super(SMTPConnection, self).__init__(*args, **kwds)
| mit |
ludwiktrammer/odoo | openerp/cli/server.py | 14 | 5063 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
OpenERP - Server
OpenERP is an ERP+CRM program for small and medium businesses.
The whole source code is distributed under the terms of the
GNU Public Licence.
(c) 2003-TODAY, Fabien Pinckaers - OpenERP SA
"""
import atexit
import csv
import logging
import os
import signal
import sys
import threading
import traceback
import time
import openerp
from . import Command
__author__ = openerp.release.author
__version__ = openerp.release.version
# Also use the `openerp` logger for the main script.
_logger = logging.getLogger('openerp')
def check_root_user():
"""Warn if the process's user is 'root' (on POSIX system)."""
if os.name == 'posix':
import pwd
if pwd.getpwuid(os.getuid())[0] == 'root':
sys.stderr.write("Running as user 'root' is a security risk.\n")
def check_postgres_user():
""" Exit if the configured database user is 'postgres'.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if config['db_user'] == 'postgres':
sys.stderr.write("Using the database user 'postgres' is a security risk, aborting.")
sys.exit(1)
def report_configuration():
""" Log the server version and some configuration values.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
_logger.info("OpenERP version %s", __version__)
_logger.info('addons paths: %s', openerp.modules.module.ad_paths)
host = config['db_host'] or os.environ.get('PGHOST', 'default')
port = config['db_port'] or os.environ.get('PGPORT', 'default')
user = config['db_user'] or os.environ.get('PGUSER', 'default')
_logger.info('database: %s@%s:%s', user, host, port)
def rm_pid_file(main_pid):
config = openerp.tools.config
if config['pidfile'] and main_pid == os.getpid():
try:
os.unlink(config['pidfile'])
except OSError:
pass
def setup_pid_file():
""" Create a file with the process id written in it.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if not openerp.evented and config['pidfile']:
pid = os.getpid()
with open(config['pidfile'], 'w') as fd:
fd.write(str(pid))
atexit.register(rm_pid_file, pid)
def export_translation():
config = openerp.tools.config
dbname = config['db_name']
if config["language"]:
msg = "language %s" % (config["language"],)
else:
msg = "new language"
_logger.info('writing translation file for %s to %s', msg,
config["translate_out"])
fileformat = os.path.splitext(config["translate_out"])[-1][1:].lower()
with open(config["translate_out"], "w") as buf:
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_export(config["language"],
config["translate_modules"] or ["all"], buf, fileformat, cr)
_logger.info('translation file written successfully')
def import_translation():
config = openerp.tools.config
context = {'overwrite': config["overwrite_existing_translations"]}
dbname = config['db_name']
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_load(
cr, config["translate_in"], config["language"], context=context,
)
def main(args):
check_root_user()
openerp.tools.config.parse_config(args)
check_postgres_user()
report_configuration()
config = openerp.tools.config
# the default limit for CSV fields in the module is 128KiB, which is not
# quite sufficient to import images to store in attachment. 500MiB is a
# bit overkill, but better safe than sorry I guess
csv.field_size_limit(500 * 1024 * 1024)
if config["db_name"]:
try:
openerp.service.db._create_empty_database(config["db_name"])
except openerp.service.db.DatabaseExists:
pass
if config["test_file"]:
config["test_enable"] = True
if config["translate_out"]:
export_translation()
sys.exit(0)
if config["translate_in"]:
import_translation()
sys.exit(0)
# This needs to be done now to ensure the use of the multiprocessing
# signaling mecanism for registries loaded with -d
if config['workers']:
openerp.multi_process = True
preload = []
if config['db_name']:
preload = config['db_name'].split(',')
stop = config["stop_after_init"]
setup_pid_file()
rc = openerp.service.server.start(preload=preload, stop=stop)
sys.exit(rc)
class Server(Command):
"""Start the odoo server (default command)"""
def run(self, args):
main(args)
| agpl-3.0 |
egtaonline/GameAnalysis | gameanalysis/reduction/twins.py | 1 | 2919 | """Twins reduction
This is the same as a deviation preserving reduction reduced to two for all
roles."""
import numpy as np
from gameanalysis import rsgame
from gameanalysis import utils
from gameanalysis.reduction import deviation_preserving as dpr
def reduce_game(full_game, red_players=None):
"""Reduce a game using twins reduction
Parameters
----------
full_game : Game
The game to reduce.
red_players : ndarray-like, optional
The reduced number of players for each role. This must be None or
the reduced number of players for the twins reductions.
"""
exp_red_players = np.minimum(full_game.num_role_players, 2)
utils.check(
red_players is None or np.all(exp_red_players == red_players),
"twins reduction didn't get expected reduced players")
return dpr.reduce_game(full_game, exp_red_players)
def expand_profiles(full_game, profiles):
"""Expand profiles using twins reduction
Parameters
----------
full_game : Game
Game that expanded profiles will be valid for.
profiles : ndarray-like
The profiles to expand
"""
red_players = np.minimum(full_game.num_role_players, 2)
profiles = np.asarray(profiles, int)
red_game = rsgame.empty(red_players, full_game.num_role_strats)
utils.check(
red_game.is_profile(profiles).all(), 'profiles must be valid')
return dpr.expand_profiles(full_game, profiles)
def reduce_profiles(red_game, profiles):
"""Reduce profiles using twins
Parameters
----------
red_game : Game
Game that reduced profiles will be profiles for. This game must
have the valid twins reduction number of players.
profiles : ndarray-like
The profiles to reduce.
"""
profiles = np.asarray(profiles, int)
utils.check(
np.all(red_game.num_role_players <= 2),
'red game must be a twins game')
return dpr.reduce_profiles(red_game, profiles)
def expand_deviation_profiles(full_game, rest, red_players=None,
role_index=None):
"""Expand all deviation profiles from a restriction
Parameters
----------
full_game : Game
The game the deviations profiles will be valid for.
rest : [bool]
The restriction to get deviations from.
red_players : [int], optional
The number of players in each role in the reduced game.IF
specified, it must match the expected number for twins reduction.
role_index : int, optional
If specified , only expand deviations for the role selected.
"""
exp_red_players = np.minimum(full_game.num_role_players, 2)
utils.check(
red_players is None or np.all(exp_red_players == red_players),
"twins reduction didn't get expected reduced players")
return dpr.expand_deviation_profiles(
full_game, rest, exp_red_players, role_index)
| apache-2.0 |
doctoromer/haya-data | server/reconstruct.py | 1 | 5009 | """The reconstruct thread module."""
import threading
import logging
import os
import protocol.thread
from utils import handle_except
class ReconstructThread(threading.Thread):
"""
The reconstruction thread handles the reconstruction of the system.
Attributes:
virtual_file (str): The name of the file in the virtual storage.
block_size (int): The size of the blocks.
duplication_level (int): How many times each block is duplicated.
validation_level (int): The number of data block that each
clients (list of str): List of the clients.
key (str): The encryption key.
logger (logging.Logger): The logger of the thread.
logic_queue (Queue.Queue): The queue of the logic thread.
reconstruct_queue (Queue.Queue): The queue of the thread.
callback (func, optional): A callback to be called
after the thread finished
temp (str): Temporary directory to store the restored files.
"""
def __init__(self,
virtual_file,
block_size,
duplication_level,
validation_level,
clients,
key,
reconstruct_queue,
logic_queue,
callback=lambda: None,
temp='temp'):
"""
Initialize the system reconstruction thread.
Args:
virtual_file (str): The name of the file in the virtual storage.
block_size (int): The size of the blocks.
duplication_level (int): How many times each block is duplicated.
validation_level (int): The number of data block that each
clients (list of str): List of the clients.
key (str): The encryption key.
reconstruct_queue (Queue.Queue): The queue of the thread.
logic_queue (Queue.Queue): The queue of the logic thread.
callback (func, optional): A callback to be called
after the thread finished
temp (str): Temporary directory to store the restored files.
"""
current_class = self.__class__
thread_name = current_class.__name__
super(current_class, self).__init__(name=thread_name)
self.logger = logging.getLogger('reconstruct')
self.virtual_file = virtual_file
self.block_size = block_size
self.duplication_level = duplication_level
self.validation_level = validation_level
self.clients = clients
self.key = key
self.reconstruct_queue = reconstruct_queue
self.logic_queue = logic_queue
self.callback = callback
self.temp = temp
@handle_except('reconstruct')
def run(self):
"""Execute the system reconstruction thread."""
reconstruct_dir = os.path.join(os.getcwdu(), self.temp, 'reconstruct')
self.logger.debug(
'reconstruction temporary directory: %s' % reconstruct_dir)
# if the temporary directory doesn't exist, create it
if not os.path.exists(self.temp):
os.mkdir(self.temp)
# if the reconstruction directory doesn't exist, create it
if not os.path.exists(reconstruct_dir):
os.mkdir(reconstruct_dir)
file_path = os.path.join(reconstruct_dir, self.virtual_file)
# event that will be used to block until the file is restored
restored_event = threading.Event()
# restore the file
message = protocol.thread.restore(
real_file=file_path,
virtual_file=self.virtual_file,
callback=restored_event.set)
self.logic_queue.put(message)
self.logger.info('start restoring the file')
# block until the file is restored
restored_event.wait()
self.logger.info('the file is restored')
# delete the file from storage
message = protocol.thread.delete(virtual_file=self.virtual_file)
self.logic_queue.put(message)
self.logger.info('the file is deleted from storage')
# event that will be used to block until the file is distributed
distributed_event = threading.Event()
# redistribute the file
distribute = protocol.thread.distribute(
file_path=file_path,
block_size=self.block_size,
duplication=self.duplication_level,
validation=self.validation_level,
callback=distributed_event.set)
self.logic_queue.put(distribute)
self.logger.info('start redistributing the file')
# block until the file is distributed
distributed_event.wait()
self.logger.info('file redistributed')
# remove the temporary file
os.remove(file_path)
self.callback()
message = protocol.thread.thread_exit(
thread_id=self.ident, success=True)
self.logic_queue.put(message)
self.logger.info(self.name + ' thread end')
| mit |
sclabs/sccms-nonrel | django/contrib/gis/db/backends/spatialite/base.py | 244 | 4347 | from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import *
from django.db.backends.sqlite3.base import DatabaseWrapper as SqliteDatabaseWrapper, \
_sqlite_extract, _sqlite_date_trunc, _sqlite_regexp
from django.contrib.gis.db.backends.spatialite.client import SpatiaLiteClient
from django.contrib.gis.db.backends.spatialite.creation import SpatiaLiteCreation
from django.contrib.gis.db.backends.spatialite.introspection import SpatiaLiteIntrospection
from django.contrib.gis.db.backends.spatialite.operations import SpatiaLiteOperations
class DatabaseWrapper(SqliteDatabaseWrapper):
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.creation = SpatiaLiteCreation(self)
self.introspection = SpatiaLiteIntrospection(self)
def _cursor(self):
if self.connection is None:
## The following is the same as in django.db.backends.sqlite3.base ##
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured("Please fill out the database NAME in the settings module before using the database.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
# Register extract, date_trunc, and regexp functions.
self.connection.create_function("django_extract", 2, _sqlite_extract)
self.connection.create_function("django_date_trunc", 2, _sqlite_date_trunc)
self.connection.create_function("regexp", 2, _sqlite_regexp)
connection_created.send(sender=self.__class__, connection=self)
## From here on, customized for GeoDjango ##
# Enabling extension loading on the SQLite connection.
try:
self.connection.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured('The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.'
)
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = self.connection.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception, msg:
raise ImproperlyConfigured('Unable to load the SpatiaLite library extension '
'"%s" because: %s' % (self.spatialite_lib, msg))
return cur
else:
return self.connection.cursor(factory=SQLiteCursorWrapper)
| bsd-3-clause |
ClearCorp-dev/odoo | addons/l10n_ae/__init__.py | 669 | 1059 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Tech Receptives (<http://techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jbedorf/tensorflow | tensorflow/python/autograph/pyct/qual_names.py | 24 | 8125 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for manipulating qualified names.
A qualified name is a uniform way to refer to simple (e.g. 'foo') and composite
(e.g. 'foo.bar') syntactic symbols.
This is *not* related to the __qualname__ attribute used by inspect, which
refers to scopes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
class Symbol(collections.namedtuple('Symbol', ['name'])):
"""Represents a Python symbol."""
class StringLiteral(collections.namedtuple('StringLiteral', ['value'])):
"""Represents a Python string literal."""
def __str__(self):
return '\'%s\'' % self.value
def __repr__(self):
return str(self)
class NumberLiteral(collections.namedtuple('NumberLiteral', ['value'])):
"""Represents a Python numeric literal."""
def __str__(self):
return '%s' % self.value
def __repr__(self):
return str(self)
# TODO(mdan): Use subclasses to remove the has_attr has_subscript booleans.
class QN(object):
"""Represents a qualified name."""
def __init__(self, base, attr=None, subscript=None):
if attr is not None and subscript is not None:
raise ValueError('A QN can only be either an attr or a subscript, not '
'both: attr={}, subscript={}.'.format(attr, subscript))
self._has_attr = False
self._has_subscript = False
if attr is not None:
if not isinstance(base, QN):
raise ValueError(
'for attribute QNs, base must be a QN; got instead "%s"' % base)
if not isinstance(attr, str):
raise ValueError('attr may only be a string; got instead "%s"' % attr)
self._parent = base
# TODO(mdan): Get rid of the tuple - it can only have 1 or 2 elements now.
self.qn = (base, attr)
self._has_attr = True
elif subscript is not None:
if not isinstance(base, QN):
raise ValueError('For subscript QNs, base must be a QN.')
self._parent = base
self.qn = (base, subscript)
self._has_subscript = True
else:
if not isinstance(base, (str, StringLiteral, NumberLiteral)):
# TODO(mdan): Require Symbol instead of string.
raise ValueError(
'for simple QNs, base must be a string or a Literal object;'
' got instead "%s"' % type(base))
assert '.' not in base and '[' not in base and ']' not in base
self._parent = None
self.qn = (base,)
def is_symbol(self):
return isinstance(self.qn[0], str)
def is_simple(self):
return len(self.qn) <= 1
def is_composite(self):
return len(self.qn) > 1
def has_subscript(self):
return self._has_subscript
def has_attr(self):
return self._has_attr
@property
def parent(self):
if self._parent is None:
raise ValueError('Cannot get parent of simple name "%s".' % self.qn[0])
return self._parent
@property
def owner_set(self):
"""Returns all the symbols (simple or composite) that own this QN.
In other words, if this symbol was modified, the symbols in the owner set
may also be affected.
Examples:
'a.b[c.d]' has two owners, 'a' and 'a.b'
"""
owners = set()
if self.has_attr() or self.has_subscript():
owners.add(self.parent)
owners.update(self.parent.owner_set)
return owners
@property
def support_set(self):
"""Returns the set of simple symbols that this QN relies on.
This would be the smallest set of symbols necessary for the QN to
statically resolve (assuming properties and index ranges are verified
at runtime).
Examples:
'a.b' has only one support symbol, 'a'
'a[i]' has two support symbols, 'a' and 'i'
"""
# TODO(mdan): This might be the set of Name nodes in the AST. Track those?
roots = set()
if self.has_attr():
roots.update(self.parent.support_set)
elif self.has_subscript():
roots.update(self.parent.support_set)
roots.update(self.qn[1].support_set)
else:
roots.add(self)
return roots
def __hash__(self):
return hash(self.qn + (self._has_attr, self._has_subscript))
def __eq__(self, other):
return (isinstance(other, QN) and self.qn == other.qn and
self.has_subscript() == other.has_subscript() and
self.has_attr() == other.has_attr())
def __str__(self):
if self.has_subscript():
return str(self.qn[0]) + '[' + str(self.qn[1]) + ']'
if self.has_attr():
return '.'.join(map(str, self.qn))
else:
return str(self.qn[0])
def __repr__(self):
return str(self)
def ssf(self):
"""Simple symbol form."""
ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn]
ssf_string = ''
for i in range(0, len(self.qn) - 1):
if self.has_subscript():
delimiter = '_sub_'
else:
delimiter = '_'
ssf_string += ssfs[i] + delimiter
return ssf_string + ssfs[-1]
def ast(self):
# The caller must adjust the context appropriately.
if self.has_subscript():
return gast.Subscript(self.parent.ast(), gast.Index(self.qn[-1].ast()),
None)
if self.has_attr():
return gast.Attribute(self.parent.ast(), self.qn[-1], None)
base = self.qn[0]
if isinstance(base, str):
return gast.Name(base, None, None)
elif isinstance(base, StringLiteral):
return gast.Str(base.value)
elif isinstance(base, NumberLiteral):
return gast.Num(base.value)
else:
assert False, ('the constructor should prevent types other than '
'str, StringLiteral and NumberLiteral')
class QnResolver(gast.NodeTransformer):
"""Annotates nodes with QN information.
Note: Not using NodeAnnos to avoid circular dependencies.
"""
def visit_Name(self, node):
node = self.generic_visit(node)
anno.setanno(node, anno.Basic.QN, QN(node.id))
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr))
return node
def visit_Subscript(self, node):
# TODO(mdan): This may no longer apply if we overload getitem.
node = self.generic_visit(node)
s = node.slice
if not isinstance(s, gast.Index):
# TODO(mdan): Support range and multi-dimensional indices.
# Continuing silently because some demos use these.
return node
if isinstance(s.value, gast.Num):
subscript = QN(NumberLiteral(s.value.n))
elif isinstance(s.value, gast.Str):
subscript = QN(StringLiteral(s.value.s))
else:
# The index may be an expression, case in which a name doesn't make sense.
if anno.hasanno(node.slice.value, anno.Basic.QN):
subscript = anno.getanno(node.slice.value, anno.Basic.QN)
else:
return node
if anno.hasanno(node.value, anno.Basic.QN):
anno.setanno(node, anno.Basic.QN,
QN(anno.getanno(node.value, anno.Basic.QN),
subscript=subscript))
return node
def resolve(node):
return QnResolver().visit(node)
def from_str(qn_str):
node = parser.parse_expression(qn_str)
node = resolve(node)
return anno.getanno(node, anno.Basic.QN)
| apache-2.0 |
adrienbrault/home-assistant | homeassistant/components/rflink/__init__.py | 5 | 20855 | """Support for Rflink devices."""
import asyncio
from collections import defaultdict
import logging
import async_timeout
from rflink.protocol import create_rflink_connection
from serial import SerialException
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_COMMAND,
CONF_DEVICE_ID,
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
STATE_ON,
)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.restore_state import RestoreEntity
from .utils import brightness_to_rflink
_LOGGER = logging.getLogger(__name__)
ATTR_EVENT = "event"
CONF_ALIASES = "aliases"
CONF_GROUP_ALIASES = "group_aliases"
CONF_GROUP = "group"
CONF_NOGROUP_ALIASES = "nogroup_aliases"
CONF_DEVICE_DEFAULTS = "device_defaults"
CONF_AUTOMATIC_ADD = "automatic_add"
CONF_FIRE_EVENT = "fire_event"
CONF_IGNORE_DEVICES = "ignore_devices"
CONF_RECONNECT_INTERVAL = "reconnect_interval"
CONF_SIGNAL_REPETITIONS = "signal_repetitions"
CONF_WAIT_FOR_ACK = "wait_for_ack"
CONF_KEEPALIVE_IDLE = "tcp_keepalive_idle_timer"
DATA_DEVICE_REGISTER = "rflink_device_register"
DATA_ENTITY_LOOKUP = "rflink_entity_lookup"
DATA_ENTITY_GROUP_LOOKUP = "rflink_entity_group_only_lookup"
DEFAULT_RECONNECT_INTERVAL = 10
DEFAULT_SIGNAL_REPETITIONS = 1
DEFAULT_TCP_KEEPALIVE_IDLE_TIMER = 3600
CONNECTION_TIMEOUT = 10
EVENT_BUTTON_PRESSED = "button_pressed"
EVENT_KEY_COMMAND = "command"
EVENT_KEY_ID = "id"
EVENT_KEY_SENSOR = "sensor"
EVENT_KEY_UNIT = "unit"
RFLINK_GROUP_COMMANDS = ["allon", "alloff"]
DOMAIN = "rflink"
SERVICE_SEND_COMMAND = "send_command"
SIGNAL_AVAILABILITY = "rflink_device_available"
SIGNAL_HANDLE_EVENT = "rflink_handle_event_{}"
SIGNAL_EVENT = "rflink_event"
TMP_ENTITY = "tmp.{}"
DEVICE_DEFAULTS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(
CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS
): vol.Coerce(int),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PORT): vol.Any(cv.port, cv.string),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_WAIT_FOR_ACK, default=True): cv.boolean,
vol.Optional(
CONF_KEEPALIVE_IDLE, default=DEFAULT_TCP_KEEPALIVE_IDLE_TIMER
): int,
vol.Optional(
CONF_RECONNECT_INTERVAL, default=DEFAULT_RECONNECT_INTERVAL
): int,
vol.Optional(CONF_IGNORE_DEVICES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SEND_COMMAND_SCHEMA = vol.Schema(
{vol.Required(CONF_DEVICE_ID): cv.string, vol.Required(CONF_COMMAND): cv.string}
)
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return "unknown"
async def async_setup(hass, config):
"""Set up the Rflink component."""
# Allow entities to register themselves by device_id to be looked up when
# new rflink events arrive to be handled
hass.data[DATA_ENTITY_LOOKUP] = {
EVENT_KEY_COMMAND: defaultdict(list),
EVENT_KEY_SENSOR: defaultdict(list),
}
hass.data[DATA_ENTITY_GROUP_LOOKUP] = {EVENT_KEY_COMMAND: defaultdict(list)}
# Allow platform to specify function to register new unknown devices
hass.data[DATA_DEVICE_REGISTER] = {}
async def async_send_command(call):
"""Send Rflink command."""
_LOGGER.debug("Rflink command for %s", str(call.data))
if not (
await RflinkCommand.send_command(
call.data.get(CONF_DEVICE_ID), call.data.get(CONF_COMMAND)
)
):
_LOGGER.error("Failed Rflink command for %s", str(call.data))
else:
async_dispatcher_send(
hass,
SIGNAL_EVENT,
{
EVENT_KEY_ID: call.data.get(CONF_DEVICE_ID),
EVENT_KEY_COMMAND: call.data.get(CONF_COMMAND),
},
)
hass.services.async_register(
DOMAIN, SERVICE_SEND_COMMAND, async_send_command, schema=SEND_COMMAND_SCHEMA
)
@callback
def event_callback(event):
"""Handle incoming Rflink events.
Rflink events arrive as dictionaries of varying content
depending on their type. Identify the events and distribute
accordingly.
"""
event_type = identify_event_type(event)
_LOGGER.debug("event of type %s: %s", event_type, event)
# Don't propagate non entity events (eg: version string, ack response)
if event_type not in hass.data[DATA_ENTITY_LOOKUP]:
_LOGGER.debug("unhandled event of type: %s", event_type)
return
# Lookup entities who registered this device id as device id or alias
event_id = event.get(EVENT_KEY_ID)
is_group_event = (
event_type == EVENT_KEY_COMMAND
and event[EVENT_KEY_COMMAND] in RFLINK_GROUP_COMMANDS
)
if is_group_event:
entity_ids = hass.data[DATA_ENTITY_GROUP_LOOKUP][event_type].get(
event_id, []
)
else:
entity_ids = hass.data[DATA_ENTITY_LOOKUP][event_type][event_id]
_LOGGER.debug("entity_ids: %s", entity_ids)
if entity_ids:
# Propagate event to every entity matching the device id
for entity in entity_ids:
_LOGGER.debug("passing event to %s", entity)
async_dispatcher_send(hass, SIGNAL_HANDLE_EVENT.format(entity), event)
elif not is_group_event:
# If device is not yet known, register with platform (if loaded)
if event_type in hass.data[DATA_DEVICE_REGISTER]:
_LOGGER.debug("device_id not known, adding new device")
# Add bogus event_id first to avoid race if we get another
# event before the device is created
# Any additional events received before the device has been
# created will thus be ignored.
hass.data[DATA_ENTITY_LOOKUP][event_type][event_id].append(
TMP_ENTITY.format(event_id)
)
hass.async_create_task(
hass.data[DATA_DEVICE_REGISTER][event_type](event)
)
else:
_LOGGER.debug("device_id not known and automatic add disabled")
# When connecting to tcp host instead of serial port (optional)
host = config[DOMAIN].get(CONF_HOST)
# TCP port when host configured, otherwise serial port
port = config[DOMAIN][CONF_PORT]
keepalive_idle_timer = None
# TCP KeepAlive only if this is TCP based connection (not serial)
if host is not None:
# TCP KEEPALIVE will be enabled if value > 0
keepalive_idle_timer = config[DOMAIN][CONF_KEEPALIVE_IDLE]
if keepalive_idle_timer < 0:
_LOGGER.error(
"A bogus TCP Keepalive IDLE timer was provided (%d secs), "
"it will be disabled. "
"Recommended values: 60-3600 (seconds)",
keepalive_idle_timer,
)
keepalive_idle_timer = None
elif keepalive_idle_timer == 0:
keepalive_idle_timer = None
elif keepalive_idle_timer <= 30:
_LOGGER.warning(
"A very short TCP Keepalive IDLE timer was provided (%d secs) "
"and may produce unexpected disconnections from RFlink device."
" Recommended values: 60-3600 (seconds)",
keepalive_idle_timer,
)
@callback
def reconnect(exc=None):
"""Schedule reconnect after connection has been unexpectedly lost."""
# Reset protocol binding before starting reconnect
RflinkCommand.set_rflink_protocol(None)
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
# If HA is not stopping, initiate new connection
if hass.state != CoreState.stopping:
_LOGGER.warning("Disconnected from Rflink, reconnecting")
hass.async_create_task(connect())
async def connect():
"""Set up connection and hook it into HA for reconnect/shutdown."""
_LOGGER.info("Initiating Rflink connection")
# Rflink create_rflink_connection decides based on the value of host
# (string or None) if serial or tcp mode should be used
# Initiate serial/tcp connection to Rflink gateway
connection = create_rflink_connection(
port=port,
host=host,
keepalive=keepalive_idle_timer,
event_callback=event_callback,
disconnect_callback=reconnect,
loop=hass.loop,
ignore=config[DOMAIN][CONF_IGNORE_DEVICES],
)
try:
with async_timeout.timeout(CONNECTION_TIMEOUT):
transport, protocol = await connection
except (
SerialException,
ConnectionRefusedError,
TimeoutError,
OSError,
asyncio.TimeoutError,
) as exc:
reconnect_interval = config[DOMAIN][CONF_RECONNECT_INTERVAL]
_LOGGER.exception(
"Error connecting to Rflink, reconnecting in %s", reconnect_interval
)
# Connection to Rflink device is lost, make entities unavailable
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
hass.loop.call_later(reconnect_interval, reconnect, exc)
return
# There is a valid connection to a Rflink device now so
# mark entities as available
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, True)
# Bind protocol to command class to allow entities to send commands
RflinkCommand.set_rflink_protocol(protocol, config[DOMAIN][CONF_WAIT_FOR_ACK])
# handle shutdown of Rflink asyncio transport
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, lambda x: transport.close()
)
_LOGGER.info("Connected to Rflink")
hass.async_create_task(connect())
async_dispatcher_connect(hass, SIGNAL_EVENT, event_callback)
return True
class RflinkDevice(Entity):
"""Representation of a Rflink device.
Contains the common logic for Rflink entities.
"""
platform = None
_state = None
_available = True
def __init__(
self,
device_id,
initial_event=None,
name=None,
aliases=None,
group=True,
group_aliases=None,
nogroup_aliases=None,
fire_event=False,
signal_repetitions=DEFAULT_SIGNAL_REPETITIONS,
):
"""Initialize the device."""
# Rflink specific attributes for every component type
self._initial_event = initial_event
self._device_id = device_id
if name:
self._name = name
else:
self._name = device_id
self._aliases = aliases
self._group = group
self._group_aliases = group_aliases
self._nogroup_aliases = nogroup_aliases
self._should_fire_event = fire_event
self._signal_repetitions = signal_repetitions
@callback
def handle_event_callback(self, event):
"""Handle incoming event for device type."""
# Call platform specific event handler
self._handle_event(event)
# Propagate changes through ha
self.async_write_ha_state()
# Put command onto bus for user to subscribe to
if self._should_fire_event and identify_event_type(event) == EVENT_KEY_COMMAND:
self.hass.bus.async_fire(
EVENT_BUTTON_PRESSED,
{ATTR_ENTITY_ID: self.entity_id, ATTR_STATE: event[EVENT_KEY_COMMAND]},
)
_LOGGER.debug(
"Fired bus event for %s: %s", self.entity_id, event[EVENT_KEY_COMMAND]
)
def _handle_event(self, event):
"""Platform specific event handler."""
raise NotImplementedError()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return a name for the device."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self.assumed_state:
return False
return self._state
@property
def assumed_state(self):
"""Assume device state until first device event sets state."""
return self._state is None
@property
def available(self):
"""Return True if entity is available."""
return self._available
@callback
def _availability_callback(self, availability):
"""Update availability state."""
self._available = availability
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register update callback."""
await super().async_added_to_hass()
# Remove temporary bogus entity_id if added
tmp_entity = TMP_ENTITY.format(self._device_id)
if (
tmp_entity
in self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id]
):
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].remove(tmp_entity)
# Register id and aliases
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id].append(
self.entity_id
)
if self._group:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].append(self.entity_id)
# aliases respond to both normal and group commands (allon/alloff)
if self._aliases:
for _id in self._aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# group_aliases only respond to group commands (allon/alloff)
if self._group_aliases:
for _id in self._group_aliases:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# nogroup_aliases only respond to normal commands
if self._nogroup_aliases:
for _id in self._nogroup_aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_AVAILABILITY, self._availability_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_HANDLE_EVENT.format(self.entity_id),
self.handle_event_callback,
)
)
# Process the initial event now that the entity is created
if self._initial_event:
self.handle_event_callback(self._initial_event)
class RflinkCommand(RflinkDevice):
"""Singleton class to make Rflink command interface available to entities.
This class is to be inherited by every Entity class that is actionable
(switches/lights). It exposes the Rflink command interface for these
entities.
The Rflink interface is managed as a class level and set during setup (and
reset on reconnect).
"""
# Keep repetition tasks to cancel if state is changed before repetitions
# are sent
_repetition_task = None
_protocol = None
@classmethod
def set_rflink_protocol(cls, protocol, wait_ack=None):
"""Set the Rflink asyncio protocol as a class variable."""
cls._protocol = protocol
if wait_ack is not None:
cls._wait_ack = wait_ack
@classmethod
def is_connected(cls):
"""Return connection status."""
return bool(cls._protocol)
@classmethod
async def send_command(cls, device_id, action):
"""Send device command to Rflink and wait for acknowledgement."""
return await cls._protocol.send_command_ack(device_id, action)
async def _async_handle_command(self, command, *args):
"""Do bookkeeping for command, send it to rflink and update state."""
self.cancel_queued_send_commands()
if command == "turn_on":
cmd = "on"
self._state = True
elif command == "turn_off":
cmd = "off"
self._state = False
elif command == "dim":
# convert brightness to rflink dim level
cmd = str(brightness_to_rflink(args[0]))
self._state = True
elif command == "toggle":
cmd = "on"
# if the state is unknown or false, it gets set as true
# if the state is true, it gets set as false
self._state = self._state in [None, False]
# Cover options for RFlink
elif command == "close_cover":
cmd = "DOWN"
self._state = False
elif command == "open_cover":
cmd = "UP"
self._state = True
elif command == "stop_cover":
cmd = "STOP"
self._state = True
# Send initial command and queue repetitions.
# This allows the entity state to be updated quickly and not having to
# wait for all repetitions to be sent
await self._async_send_command(cmd, self._signal_repetitions)
# Update state of entity
self.async_write_ha_state()
def cancel_queued_send_commands(self):
"""Cancel queued signal repetition commands.
For example when user changed state while repetitions are still
queued for broadcast. Or when an incoming Rflink command (remote
switch) changes the state.
"""
# cancel any outstanding tasks from the previous state change
if self._repetition_task:
self._repetition_task.cancel()
async def _async_send_command(self, cmd, repetitions):
"""Send a command for device to Rflink gateway."""
_LOGGER.debug("Sending command: %s to Rflink device: %s", cmd, self._device_id)
if not self.is_connected():
raise HomeAssistantError("Cannot send command, not connected!")
if self._wait_ack:
# Puts command on outgoing buffer then waits for Rflink to confirm
# the command has been sent out.
await self._protocol.send_command_ack(self._device_id, cmd)
else:
# Puts command on outgoing buffer and returns straight away.
# Rflink protocol/transport handles asynchronous writing of buffer
# to serial/tcp device. Does not wait for command send
# confirmation.
self._protocol.send_command(self._device_id, cmd)
if repetitions > 1:
self._repetition_task = self.hass.async_create_task(
self._async_send_command(cmd, repetitions - 1)
)
class SwitchableRflinkDevice(RflinkCommand, RestoreEntity):
"""Rflink entity which can switch on/off (eg: light, switch)."""
async def async_added_to_hass(self):
"""Restore RFLink device state (ON/OFF)."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_ON
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event["command"]
if command in ["on", "allon"]:
self._state = True
elif command in ["off", "alloff"]:
self._state = False
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._async_handle_command("turn_on")
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._async_handle_command("turn_off")
| mit |
slayerjain/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/blank_wsh.py | 499 | 1557 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# intentionally left blank
| mpl-2.0 |
CodeDJ/qt5-hidpi | qt/qtwebkit/Tools/Scripts/webkitpy/test/finder_unittest.py | 124 | 5471 | # Copyright (C) 2012 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.test.finder import Finder
class FinderTest(unittest.TestCase):
def setUp(self):
files = {
'/foo/bar/baz.py': '',
'/foo/bar/baz_unittest.py': '',
'/foo2/bar2/baz2.py': '',
'/foo2/bar2/baz2.pyc': '',
'/foo2/bar2/baz2_integrationtest.py': '',
'/foo2/bar2/missing.pyc': '',
'/tmp/another_unittest.py': '',
}
self.fs = MockFileSystem(files)
self.finder = Finder(self.fs)
self.finder.add_tree('/foo', 'bar')
self.finder.add_tree('/foo2')
# Here we have to jump through a hoop to make sure test-webkitpy doesn't log
# any messages from these tests :(.
self.root_logger = logging.getLogger()
self.log_levels = []
self.log_handlers = self.root_logger.handlers[:]
for handler in self.log_handlers:
self.log_levels.append(handler.level)
handler.level = logging.CRITICAL
def tearDown(self):
for handler in self.log_handlers:
handler.level = self.log_levels.pop(0)
def test_additional_system_paths(self):
self.assertEqual(self.finder.additional_paths(['/usr']),
['/foo', '/foo2'])
def test_is_module(self):
self.assertTrue(self.finder.is_module('bar.baz'))
self.assertTrue(self.finder.is_module('bar2.baz2'))
self.assertTrue(self.finder.is_module('bar2.baz2_integrationtest'))
# Missing the proper namespace.
self.assertFalse(self.finder.is_module('baz'))
def test_to_module(self):
self.assertEqual(self.finder.to_module('/foo/test.py'), 'test')
self.assertEqual(self.finder.to_module('/foo/bar/test.py'), 'bar.test')
self.assertEqual(self.finder.to_module('/foo/bar/pytest.py'), 'bar.pytest')
def test_clean(self):
self.assertTrue(self.fs.exists('/foo2/bar2/missing.pyc'))
self.finder.clean_trees()
self.assertFalse(self.fs.exists('/foo2/bar2/missing.pyc'))
def check_names(self, names, expected_names, find_all=True):
self.assertEqual(self.finder.find_names(names, find_all), expected_names)
def test_default_names(self):
self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=True)
self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=False)
# Should return the names given it, even if they don't exist.
self.check_names(['foobar'], ['foobar'], find_all=False)
def test_paths(self):
self.fs.chdir('/foo/bar')
self.check_names(['baz_unittest.py'], ['bar.baz_unittest'])
self.check_names(['./baz_unittest.py'], ['bar.baz_unittest'])
self.check_names(['/foo/bar/baz_unittest.py'], ['bar.baz_unittest'])
self.check_names(['.'], ['bar.baz_unittest'])
self.check_names(['../../foo2/bar2'], ['bar2.baz2_integrationtest'])
self.fs.chdir('/')
self.check_names(['bar'], ['bar.baz_unittest'])
self.check_names(['/foo/bar/'], ['bar.baz_unittest'])
# This works 'by accident' since it maps onto a package.
self.check_names(['bar/'], ['bar.baz_unittest'])
# This should log an error, since it's outside the trees.
oc = OutputCapture()
oc.set_log_level(logging.ERROR)
oc.capture_output()
try:
self.check_names(['/tmp/another_unittest.py'], [])
finally:
_, _, logs = oc.restore_output()
self.assertIn('another_unittest.py', logs)
# Paths that don't exist are errors.
oc.capture_output()
try:
self.check_names(['/foo/bar/notexist_unittest.py'], [])
finally:
_, _, logs = oc.restore_output()
self.assertIn('notexist_unittest.py', logs)
# Names that don't exist are caught later, at load time.
self.check_names(['bar.notexist_unittest'], ['bar.notexist_unittest'])
| lgpl-2.1 |
GIJensen/bitcoin | qa/rpc-tests/test_framework/mininode.py | 5 | 36233 | # mininode.py - Bitcoin P2P network half-a-node
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a bitcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# bitcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import binascii
import time
import sys
import random
import cStringIO
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
BIP0031_VERSION = 60000
MY_VERSION = 60001 # past bip-31 for ping/pong
MY_SUBVERSION = "/python-mininode-tester:0.0.1/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return chr(len(s)) + s
elif len(s) < 0x10000:
return chr(253) + struct.pack("<H", len(s)) + s
elif len(s) < 0x100000000L:
return chr(254) + struct.pack("<I", len(s)) + s
return chr(255) + struct.pack("<Q", len(s)) + s
def deser_uint256(f):
r = 0L
for i in xrange(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = ""
for i in xrange(8):
rs += struct.pack("<I", u & 0xFFFFFFFFL)
u >>= 32
return rs
def uint256_from_str(s):
r = 0L
t = struct.unpack("<IIIIIIII", s[:32])
for i in xrange(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = "\x00" * 10 + "\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0L):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig="", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), binascii.hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // 100000000, self.nValue % 100000000,
binascii.hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = hash256(self.serialize())[::-1].encode('hex_codec')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000L * 100000000L:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = hash256(r)[::-1].encode('hex_codec')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = ""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in xrange(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = ""
self.strStatusBar = ""
self.strReserved = ""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = ""
self.vchSig = ""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = ""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = "version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = time.time()
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = "verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = "addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = "alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = ""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = "inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = "getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = "getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = "tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = "block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = "getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = "ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = "ping"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = "pong"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = "mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = "sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = "getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = "headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = "reject"
def __init__(self):
self.message = ""
self.code = ""
self.reason = ""
self.data = 0L
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.message == "block" or self.message == "tx"):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.message == "block" or self.message == "tx"):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
with mininode_lock:
try:
getattr(self, 'on_' + message.command)(conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0])
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
"version": msg_version,
"verack": msg_verack,
"addr": msg_addr,
"alert": msg_alert,
"inv": msg_inv,
"getdata": msg_getdata,
"getblocks": msg_getblocks,
"tx": msg_tx,
"block": msg_block,
"getaddr": msg_getaddr,
"ping": msg_ping,
"pong": msg_pong,
"headers": msg_headers,
"getheaders": msg_getheaders,
"reject": msg_reject,
"mempool": msg_mempool
}
MAGIC_BYTES = {
"mainnet": "\xf9\xbe\xb4\xd9", # mainnet
"testnet3": "\x0b\x11\x09\x07", # testnet3
"regtest": "\xfa\xbf\xb5\xda" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = ""
self.recvbuf = ""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print 'MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ str(dstport)
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = ""
self.sendbuf = ""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = cStringIO.StringIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += "\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == "version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap['ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap['ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| mit |
TeslaProject/external_chromium_org | tools/cr/cr/commands/select.py | 65 | 1802 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the select command."""
import cr
# The set of variables SELECT writes into the client plugin to control the
# active output directory.
SELECT_OUT_VARS = ['CR_OUT_FULL']
class SelectCommand(cr.Command):
"""The implementation of the select command.
The select command is used to set the default output directory used by all
other commands. It does this by writing out a plugin into the client root
that sets the active output path.
"""
def __init__(self):
super(SelectCommand, self).__init__()
self.help = 'Select an output directory'
self.description = ("""
This makes the specified output directory the default for all future
operations. It also invokes prepare on that directory.
""")
def AddArguments(self, subparsers):
parser = super(SelectCommand, self).AddArguments(subparsers)
self.AddPrepareArguments(parser)
return parser
@classmethod
def AddPrepareArguments(cls, parser):
parser.add_argument(
'--no-prepare', dest='_no_prepare',
action='store_true', default=False,
help='Don\'t prepare the output directory.'
)
def Run(self):
self.Select()
@classmethod
def Select(cls):
"""Performs the select.
This is also called by the init command to auto select the new output
directory.
"""
cr.base.client.WriteConfig(
cr.context.Get('CR_CLIENT_PATH'), dict(
CR_OUT_FULL=cr.context.Get('CR_OUT_FULL')))
cr.base.client.PrintInfo()
# Now we run the post select actions
if not getattr(cr.context.args, '_no_prepare', None):
cr.PrepareCommand.Prepare()
| bsd-3-clause |
marcuskelly/recover | Lib/site-packages/coverage/plugin_support.py | 76 | 7892 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Support for plugins."""
import os
import os.path
import sys
from coverage.misc import CoverageException, isolate_module
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
os = isolate_module(os)
class Plugins(object):
"""The currently loaded collection of coverage.py plugins."""
def __init__(self):
self.order = []
self.names = {}
self.file_tracers = []
self.current_module = None
self.debug = None
@classmethod
def load_plugins(cls, modules, config, debug=None):
"""Load plugins from `modules`.
Returns a list of loaded and configured plugins.
"""
plugins = cls()
plugins.debug = debug
for module in modules:
plugins.current_module = module
__import__(module)
mod = sys.modules[module]
coverage_init = getattr(mod, "coverage_init", None)
if not coverage_init:
raise CoverageException(
"Plugin module %r didn't define a coverage_init function" % module
)
options = config.get_plugin_options(module)
coverage_init(plugins, options)
plugins.current_module = None
return plugins
def add_file_tracer(self, plugin):
"""Add a file tracer plugin.
`plugin` is an instance of a third-party plugin class. It must
implement the :meth:`CoveragePlugin.file_tracer` method.
"""
self._add_plugin(plugin, self.file_tracers)
def add_noop(self, plugin):
"""Add a plugin that does nothing.
This is only useful for testing the plugin support.
"""
self._add_plugin(plugin, None)
def _add_plugin(self, plugin, specialized):
"""Add a plugin object.
`plugin` is a :class:`CoveragePlugin` instance to add. `specialized`
is a list to append the plugin to.
"""
plugin_name = "%s.%s" % (self.current_module, plugin.__class__.__name__)
if self.debug and self.debug.should('plugin'):
self.debug.write("Loaded plugin %r: %r" % (self.current_module, plugin))
labelled = LabelledDebug("plugin %r" % (self.current_module,), self.debug)
plugin = DebugPluginWrapper(plugin, labelled)
# pylint: disable=attribute-defined-outside-init
plugin._coverage_plugin_name = plugin_name
plugin._coverage_enabled = True
self.order.append(plugin)
self.names[plugin_name] = plugin
if specialized is not None:
specialized.append(plugin)
def __nonzero__(self):
return bool(self.order)
__bool__ = __nonzero__
def __iter__(self):
return iter(self.order)
def get(self, plugin_name):
"""Return a plugin by name."""
return self.names[plugin_name]
class LabelledDebug(object):
"""A Debug writer, but with labels for prepending to the messages."""
def __init__(self, label, debug, prev_labels=()):
self.labels = list(prev_labels) + [label]
self.debug = debug
def add_label(self, label):
"""Add a label to the writer, and return a new `LabelledDebug`."""
return LabelledDebug(label, self.debug, self.labels)
def message_prefix(self):
"""The prefix to use on messages, combining the labels."""
prefixes = self.labels + ['']
return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
def write(self, message):
"""Write `message`, but with the labels prepended."""
self.debug.write("%s%s" % (self.message_prefix(), message))
class DebugPluginWrapper(CoveragePlugin):
"""Wrap a plugin, and use debug to report on what it's doing."""
def __init__(self, plugin, debug):
super(DebugPluginWrapper, self).__init__()
self.plugin = plugin
self.debug = debug
def file_tracer(self, filename):
tracer = self.plugin.file_tracer(filename)
self.debug.write("file_tracer(%r) --> %r" % (filename, tracer))
if tracer:
debug = self.debug.add_label("file %r" % (filename,))
tracer = DebugFileTracerWrapper(tracer, debug)
return tracer
def file_reporter(self, filename):
reporter = self.plugin.file_reporter(filename)
self.debug.write("file_reporter(%r) --> %r" % (filename, reporter))
if reporter:
debug = self.debug.add_label("file %r" % (filename,))
reporter = DebugFileReporterWrapper(filename, reporter, debug)
return reporter
def sys_info(self):
return self.plugin.sys_info()
class DebugFileTracerWrapper(FileTracer):
"""A debugging `FileTracer`."""
def __init__(self, tracer, debug):
self.tracer = tracer
self.debug = debug
def _show_frame(self, frame):
"""A short string identifying a frame, for debug messages."""
return "%s@%d" % (
os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
)
def source_filename(self):
sfilename = self.tracer.source_filename()
self.debug.write("source_filename() --> %r" % (sfilename,))
return sfilename
def has_dynamic_source_filename(self):
has = self.tracer.has_dynamic_source_filename()
self.debug.write("has_dynamic_source_filename() --> %r" % (has,))
return has
def dynamic_source_filename(self, filename, frame):
dyn = self.tracer.dynamic_source_filename(filename, frame)
self.debug.write("dynamic_source_filename(%r, %s) --> %r" % (
filename, self._show_frame(frame), dyn,
))
return dyn
def line_number_range(self, frame):
pair = self.tracer.line_number_range(frame)
self.debug.write("line_number_range(%s) --> %r" % (self._show_frame(frame), pair))
return pair
class DebugFileReporterWrapper(FileReporter):
"""A debugging `FileReporter`."""
def __init__(self, filename, reporter, debug):
super(DebugFileReporterWrapper, self).__init__(filename)
self.reporter = reporter
self.debug = debug
def relative_filename(self):
ret = self.reporter.relative_filename()
self.debug.write("relative_filename() --> %r" % (ret,))
return ret
def lines(self):
ret = self.reporter.lines()
self.debug.write("lines() --> %r" % (ret,))
return ret
def excluded_lines(self):
ret = self.reporter.excluded_lines()
self.debug.write("excluded_lines() --> %r" % (ret,))
return ret
def translate_lines(self, lines):
ret = self.reporter.translate_lines(lines)
self.debug.write("translate_lines(%r) --> %r" % (lines, ret))
return ret
def translate_arcs(self, arcs):
ret = self.reporter.translate_arcs(arcs)
self.debug.write("translate_arcs(%r) --> %r" % (arcs, ret))
return ret
def no_branch_lines(self):
ret = self.reporter.no_branch_lines()
self.debug.write("no_branch_lines() --> %r" % (ret,))
return ret
def exit_counts(self):
ret = self.reporter.exit_counts()
self.debug.write("exit_counts() --> %r" % (ret,))
return ret
def arcs(self):
ret = self.reporter.arcs()
self.debug.write("arcs() --> %r" % (ret,))
return ret
def source(self):
ret = self.reporter.source()
self.debug.write("source() --> %d chars" % (len(ret),))
return ret
def source_token_lines(self):
ret = list(self.reporter.source_token_lines())
self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
return ret
| bsd-2-clause |
Jorge-Rodriguez/ansible | lib/ansible/modules/network/aci/aci_interface_policy_port_security.py | 12 | 6440 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_port_security
short_description: Manage port security (l2:PortSecurityPol)
description:
- Manage port security on Cisco ACI fabrics.
seealso:
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(l2:PortSecurityPol).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
port_security:
description:
- The name of the port security.
type: str
required: yes
aliases: [ name ]
description:
description:
- The description for the contract.
type: str
aliases: [ descr ]
max_end_points:
description:
- Maximum number of end points.
- Accepted values range between C(0) and C(12000).
- The APIC defaults to C(0) when unset during creation.
type: int
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_interface_policy_port_security:
host: '{{ inventory_hostname }}'
username: '{{ username }}'
password: '{{ password }}'
port_security: '{{ port_security }}'
description: '{{ descr }}'
max_end_points: '{{ max_end_points }}'
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
port_security=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
max_end_points=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['port_security']],
['state', 'present', ['port_security']],
],
)
port_security = module.params['port_security']
description = module.params['description']
max_end_points = module.params['max_end_points']
if max_end_points is not None and max_end_points not in range(12001):
module.fail_json(msg='The "max_end_points" must be between 0 and 12000')
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='l2PortSecurityPol',
aci_rn='infra/portsecurityP-{0}'.format(port_security),
module_object=port_security,
target_filter={'name': port_security},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='l2PortSecurityPol',
class_config=dict(
name=port_security,
descr=description,
maximum=max_end_points,
),
)
aci.get_diff(aci_class='l2PortSecurityPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
garverp/gnuradio | gr-digital/python/digital/ofdm_packet_utils.py | 27 | 26971 | #
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import struct
import numpy
from gnuradio import gru
import crc
def conv_packed_binary_string_to_1_0_string(s):
"""
'\xAF' --> '10101111'
"""
r = []
for ch in s:
x = ord(ch)
for i in range(7,-1,-1):
t = (x >> i) & 0x1
r.append(t)
return ''.join(map(lambda x: chr(x + ord('0')), r))
def conv_1_0_string_to_packed_binary_string(s):
"""
'10101111' -> ('\xAF', False)
Basically the inverse of conv_packed_binary_string_to_1_0_string,
but also returns a flag indicating if we had to pad with leading zeros
to get to a multiple of 8.
"""
if not is_1_0_string(s):
raise ValueError, "Input must be a string containing only 0's and 1's"
# pad to multiple of 8
padded = False
rem = len(s) % 8
if rem != 0:
npad = 8 - rem
s = '0' * npad + s
padded = True
assert len(s) % 8 == 0
r = []
i = 0
while i < len(s):
t = 0
for j in range(8):
t = (t << 1) | (ord(s[i + j]) - ord('0'))
r.append(chr(t))
i += 8
return (''.join(r), padded)
def is_1_0_string(s):
if not isinstance(s, str):
return False
for ch in s:
if not ch in ('0', '1'):
return False
return True
def string_to_hex_list(s):
return map(lambda x: hex(ord(x)), s)
def whiten(s, o):
sa = numpy.fromstring(s, numpy.uint8)
z = sa ^ random_mask_vec8[o:len(sa)+o]
return z.tostring()
def dewhiten(s, o):
return whiten(s, o) # self inverse
def make_header(payload_len, whitener_offset=0):
# Upper nibble is offset, lower 12 bits is len
val = ((whitener_offset & 0xf) << 12) | (payload_len & 0x0fff)
#print "offset =", whitener_offset, " len =", payload_len, " val=", val
return struct.pack('!HH', val, val)
def make_packet(payload, samples_per_symbol, bits_per_symbol,
pad_for_usrp=True, whitener_offset=0, whitening=True):
"""
Build a packet, given access code, payload, and whitener offset
Args:
payload: packet payload, len [0, 4096]
samples_per_symbol: samples per symbol (needed for padding calculation) (int)
bits_per_symbol: (needed for padding calculation) (int)
whitener_offset: offset into whitener string to use [0-16)
whitening: Turn whitener on or off (bool)
Packet will have access code at the beginning, followed by length, payload
and finally CRC-32.
"""
if not whitener_offset >=0 and whitener_offset < 16:
raise ValueError, "whitener_offset must be between 0 and 15, inclusive (%i)" % (whitener_offset,)
payload_with_crc = crc.gen_and_append_crc32(payload)
#print "outbound crc =", string_to_hex_list(payload_with_crc[-4:])
L = len(payload_with_crc)
MAXLEN = len(random_mask_tuple)
if L > MAXLEN:
raise ValueError, "len(payload) must be in [0, %d]" % (MAXLEN,)
pkt_hd = make_header(L, whitener_offset)
pkt_dt = ''.join((payload_with_crc, '\x55'))
packet_length = len(pkt_hd) + len(pkt_dt)
if pad_for_usrp:
usrp_packing = _npadding_bytes(packet_length, samples_per_symbol, bits_per_symbol) * '\x55'
pkt_dt = pkt_dt + usrp_packing
if(whitening):
pkt = pkt_hd + whiten(pkt_dt, whitener_offset)
else:
pkt = pkt_hd + pkt_dt
#print "make_packet: len(pkt) =", len(pkt)
return pkt
def _npadding_bytes(pkt_byte_len, samples_per_symbol, bits_per_symbol):
"""
Generate sufficient padding such that each packet ultimately ends
up being a multiple of 512 bytes when sent across the USB. We
send 4-byte samples across the USB (16-bit I and 16-bit Q), thus
we want to pad so that after modulation the resulting packet
is a multiple of 128 samples.
Args:
ptk_byte_len: len in bytes of packet, not including padding.
samples_per_symbol: samples per bit (1 bit / symbolwidth GMSK) (int)
bits_per_symbol: bits per symbol (log2(modulation order)) (int)
Returns:
number of bytes of padding to append.
"""
modulus = 128
byte_modulus = gru.lcm(modulus/8, samples_per_symbol) * bits_per_symbol / samples_per_symbol
r = pkt_byte_len % byte_modulus
if r == 0:
return 0
return byte_modulus - r
def unmake_packet(whitened_payload_with_crc, whitener_offset=0, dewhitening=1):
"""
Return (ok, payload)
Args:
whitened_payload_with_crc: string
whitener_offset: offset into whitener string to use [0-16)
dewhitening: Turn whitener on or off (bool)
"""
if dewhitening:
payload_with_crc = dewhiten(whitened_payload_with_crc, whitener_offset)
else:
payload_with_crc = whitened_payload_with_crc
ok, payload = crc.check_crc32(payload_with_crc)
if 0:
print "payload_with_crc =", string_to_hex_list(payload_with_crc)
print "ok = %r, len(payload) = %d" % (ok, len(payload))
print "payload =", string_to_hex_list(payload)
return ok, payload
# FYI, this PN code is the output of a 15-bit LFSR
random_mask_tuple = (
255, 63, 0, 16, 0, 12, 0, 5, 192, 3, 16, 1, 204, 0, 85, 192,
63, 16, 16, 12, 12, 5, 197, 195, 19, 17, 205, 204, 85, 149, 255, 47,
0, 28, 0, 9, 192, 6, 208, 2, 220, 1, 153, 192, 106, 208, 47, 28,
28, 9, 201, 198, 214, 210, 222, 221, 152, 89, 170, 186, 255, 51, 0, 21,
192, 15, 16, 4, 12, 3, 69, 193, 243, 16, 69, 204, 51, 21, 213, 207,
31, 20, 8, 15, 70, 132, 50, 227, 85, 137, 255, 38, 192, 26, 208, 11,
28, 7, 73, 194, 182, 209, 182, 220, 118, 217, 230, 218, 202, 219, 23, 27,
78, 139, 116, 103, 103, 106, 170, 175, 63, 60, 16, 17, 204, 12, 85, 197,
255, 19, 0, 13, 192, 5, 144, 3, 44, 1, 221, 192, 89, 144, 58, 236,
19, 13, 205, 197, 149, 147, 47, 45, 220, 29, 153, 201, 170, 214, 255, 30,
192, 8, 80, 6, 188, 2, 241, 193, 132, 80, 99, 124, 41, 225, 222, 200,
88, 86, 186, 190, 243, 48, 69, 212, 51, 31, 85, 200, 63, 22, 144, 14,
236, 4, 77, 195, 117, 145, 231, 44, 74, 157, 247, 41, 134, 158, 226, 232,
73, 142, 182, 228, 118, 203, 102, 215, 106, 222, 175, 24, 124, 10, 161, 199,
56, 82, 146, 189, 173, 177, 189, 180, 113, 183, 100, 118, 171, 102, 255, 106,
192, 47, 16, 28, 12, 9, 197, 198, 211, 18, 221, 205, 153, 149, 170, 239,
63, 12, 16, 5, 204, 3, 21, 193, 207, 16, 84, 12, 63, 69, 208, 51,
28, 21, 201, 207, 22, 212, 14, 223, 68, 88, 51, 122, 149, 227, 47, 9,
220, 6, 217, 194, 218, 209, 155, 28, 107, 73, 239, 118, 204, 38, 213, 218,
223, 27, 24, 11, 74, 135, 119, 34, 166, 153, 186, 234, 243, 15, 5, 196,
3, 19, 65, 205, 240, 85, 132, 63, 35, 80, 25, 252, 10, 193, 199, 16,
82, 140, 61, 165, 209, 187, 28, 115, 73, 229, 246, 203, 6, 215, 66, 222,
177, 152, 116, 106, 167, 111, 58, 172, 19, 61, 205, 209, 149, 156, 111, 41,
236, 30, 205, 200, 85, 150, 191, 46, 240, 28, 68, 9, 243, 70, 197, 242,
211, 5, 157, 195, 41, 145, 222, 236, 88, 77, 250, 181, 131, 55, 33, 214,
152, 94, 234, 184, 79, 50, 180, 21, 183, 79, 54, 180, 22, 247, 78, 198,
180, 82, 247, 125, 134, 161, 162, 248, 121, 130, 162, 225, 185, 136, 114, 230,
165, 138, 251, 39, 3, 90, 129, 251, 32, 67, 88, 49, 250, 148, 67, 47,
113, 220, 36, 89, 219, 122, 219, 99, 27, 105, 203, 110, 215, 108, 94, 173,
248, 125, 130, 161, 161, 184, 120, 114, 162, 165, 185, 187, 50, 243, 85, 133,
255, 35, 0, 25, 192, 10, 208, 7, 28, 2, 137, 193, 166, 208, 122, 220,
35, 25, 217, 202, 218, 215, 27, 30, 139, 72, 103, 118, 170, 166, 255, 58,
192, 19, 16, 13, 204, 5, 149, 195, 47, 17, 220, 12, 89, 197, 250, 211,
3, 29, 193, 201, 144, 86, 236, 62, 205, 208, 85, 156, 63, 41, 208, 30,
220, 8, 89, 198, 186, 210, 243, 29, 133, 201, 163, 22, 249, 206, 194, 212,
81, 159, 124, 104, 33, 238, 152, 76, 106, 181, 239, 55, 12, 22, 133, 206,
227, 20, 73, 207, 118, 212, 38, 223, 90, 216, 59, 26, 147, 75, 45, 247,
93, 134, 185, 162, 242, 249, 133, 130, 227, 33, 137, 216, 102, 218, 170, 219,
63, 27, 80, 11, 124, 7, 97, 194, 168, 81, 190, 188, 112, 113, 228, 36,
75, 91, 119, 123, 102, 163, 106, 249, 239, 2, 204, 1, 149, 192, 111, 16,
44, 12, 29, 197, 201, 147, 22, 237, 206, 205, 148, 85, 175, 127, 60, 32,
17, 216, 12, 90, 133, 251, 35, 3, 89, 193, 250, 208, 67, 28, 49, 201,
212, 86, 223, 126, 216, 32, 90, 152, 59, 42, 147, 95, 45, 248, 29, 130,
137, 161, 166, 248, 122, 194, 163, 17, 185, 204, 114, 213, 229, 159, 11, 40,
7, 94, 130, 184, 97, 178, 168, 117, 190, 167, 48, 122, 148, 35, 47, 89,
220, 58, 217, 211, 26, 221, 203, 25, 151, 74, 238, 183, 12, 118, 133, 230,
227, 10, 201, 199, 22, 210, 142, 221, 164, 89, 187, 122, 243, 99, 5, 233,
195, 14, 209, 196, 92, 83, 121, 253, 226, 193, 137, 144, 102, 236, 42, 205,
223, 21, 152, 15, 42, 132, 31, 35, 72, 25, 246, 138, 198, 231, 18, 202,
141, 151, 37, 174, 155, 60, 107, 81, 239, 124, 76, 33, 245, 216, 71, 26,
178, 139, 53, 167, 87, 58, 190, 147, 48, 109, 212, 45, 159, 93, 168, 57,
190, 146, 240, 109, 132, 45, 163, 93, 185, 249, 178, 194, 245, 145, 135, 44,
98, 157, 233, 169, 142, 254, 228, 64, 75, 112, 55, 100, 22, 171, 78, 255,
116, 64, 39, 112, 26, 164, 11, 59, 71, 83, 114, 189, 229, 177, 139, 52,
103, 87, 106, 190, 175, 48, 124, 20, 33, 207, 88, 84, 58, 191, 83, 48,
61, 212, 17, 159, 76, 104, 53, 238, 151, 12, 110, 133, 236, 99, 13, 233,
197, 142, 211, 36, 93, 219, 121, 155, 98, 235, 105, 143, 110, 228, 44, 75,
93, 247, 121, 134, 162, 226, 249, 137, 130, 230, 225, 138, 200, 103, 22, 170,
142, 255, 36, 64, 27, 112, 11, 100, 7, 107, 66, 175, 113, 188, 36, 113,
219, 100, 91, 107, 123, 111, 99, 108, 41, 237, 222, 205, 152, 85, 170, 191,
63, 48, 16, 20, 12, 15, 69, 196, 51, 19, 85, 205, 255, 21, 128, 15,
32, 4, 24, 3, 74, 129, 247, 32, 70, 152, 50, 234, 149, 143, 47, 36,
28, 27, 73, 203, 118, 215, 102, 222, 170, 216, 127, 26, 160, 11, 56, 7,
82, 130, 189, 161, 177, 184, 116, 114, 167, 101, 186, 171, 51, 63, 85, 208,
63, 28, 16, 9, 204, 6, 213, 194, 223, 17, 152, 12, 106, 133, 239, 35,
12, 25, 197, 202, 211, 23, 29, 206, 137, 148, 102, 239, 106, 204, 47, 21,
220, 15, 25, 196, 10, 211, 71, 29, 242, 137, 133, 166, 227, 58, 201, 211,
22, 221, 206, 217, 148, 90, 239, 123, 12, 35, 69, 217, 243, 26, 197, 203,
19, 23, 77, 206, 181, 148, 119, 47, 102, 156, 42, 233, 223, 14, 216, 4,
90, 131, 123, 33, 227, 88, 73, 250, 182, 195, 54, 209, 214, 220, 94, 217,
248, 90, 194, 187, 17, 179, 76, 117, 245, 231, 7, 10, 130, 135, 33, 162,
152, 121, 170, 162, 255, 57, 128, 18, 224, 13, 136, 5, 166, 131, 58, 225,
211, 8, 93, 198, 185, 146, 242, 237, 133, 141, 163, 37, 185, 219, 50, 219,
85, 155, 127, 43, 96, 31, 104, 8, 46, 134, 156, 98, 233, 233, 142, 206,
228, 84, 75, 127, 119, 96, 38, 168, 26, 254, 139, 0, 103, 64, 42, 176,
31, 52, 8, 23, 70, 142, 178, 228, 117, 139, 103, 39, 106, 154, 175, 43,
60, 31, 81, 200, 60, 86, 145, 254, 236, 64, 77, 240, 53, 132, 23, 35,
78, 153, 244, 106, 199, 111, 18, 172, 13, 189, 197, 177, 147, 52, 109, 215,
109, 158, 173, 168, 125, 190, 161, 176, 120, 116, 34, 167, 89, 186, 186, 243,
51, 5, 213, 195, 31, 17, 200, 12, 86, 133, 254, 227, 0, 73, 192, 54,
208, 22, 220, 14, 217, 196, 90, 211, 123, 29, 227, 73, 137, 246, 230, 198,
202, 210, 215, 29, 158, 137, 168, 102, 254, 170, 192, 127, 16, 32, 12, 24,
5, 202, 131, 23, 33, 206, 152, 84, 106, 191, 111, 48, 44, 20, 29, 207,
73, 148, 54, 239, 86, 204, 62, 213, 208, 95, 28, 56, 9, 210, 134, 221,
162, 217, 185, 154, 242, 235, 5, 143, 67, 36, 49, 219, 84, 91, 127, 123,
96, 35, 104, 25, 238, 138, 204, 103, 21, 234, 143, 15, 36, 4, 27, 67,
75, 113, 247, 100, 70, 171, 114, 255, 101, 128, 43, 32, 31, 88, 8, 58,
134, 147, 34, 237, 217, 141, 154, 229, 171, 11, 63, 71, 80, 50, 188, 21,
177, 207, 52, 84, 23, 127, 78, 160, 52, 120, 23, 98, 142, 169, 164, 126,
251, 96, 67, 104, 49, 238, 148, 76, 111, 117, 236, 39, 13, 218, 133, 155,
35, 43, 89, 223, 122, 216, 35, 26, 153, 203, 42, 215, 95, 30, 184, 8,
114, 134, 165, 162, 251, 57, 131, 82, 225, 253, 136, 65, 166, 176, 122, 244,
35, 7, 89, 194, 186, 209, 179, 28, 117, 201, 231, 22, 202, 142, 215, 36,
94, 155, 120, 107, 98, 175, 105, 188, 46, 241, 220, 68, 89, 243, 122, 197,
227, 19, 9, 205, 198, 213, 146, 223, 45, 152, 29, 170, 137, 191, 38, 240,
26, 196, 11, 19, 71, 77, 242, 181, 133, 183, 35, 54, 153, 214, 234, 222,
207, 24, 84, 10, 191, 71, 48, 50, 148, 21, 175, 79, 60, 52, 17, 215,
76, 94, 181, 248, 119, 2, 166, 129, 186, 224, 115, 8, 37, 198, 155, 18,
235, 77, 143, 117, 164, 39, 59, 90, 147, 123, 45, 227, 93, 137, 249, 166,
194, 250, 209, 131, 28, 97, 201, 232, 86, 206, 190, 212, 112, 95, 100, 56,
43, 82, 159, 125, 168, 33, 190, 152, 112, 106, 164, 47, 59, 92, 19, 121,
205, 226, 213, 137, 159, 38, 232, 26, 206, 139, 20, 103, 79, 106, 180, 47,
55, 92, 22, 185, 206, 242, 212, 69, 159, 115, 40, 37, 222, 155, 24, 107,
74, 175, 119, 60, 38, 145, 218, 236, 91, 13, 251, 69, 131, 115, 33, 229,
216, 75, 26, 183, 75, 54, 183, 86, 246, 190, 198, 240, 82, 196, 61, 147,
81, 173, 252, 125, 129, 225, 160, 72, 120, 54, 162, 150, 249, 174, 194, 252,
81, 129, 252, 96, 65, 232, 48, 78, 148, 52, 111, 87, 108, 62, 173, 208,
125, 156, 33, 169, 216, 126, 218, 160, 91, 56, 59, 82, 147, 125, 173, 225,
189, 136, 113, 166, 164, 122, 251, 99, 3, 105, 193, 238, 208, 76, 92, 53,
249, 215, 2, 222, 129, 152, 96, 106, 168, 47, 62, 156, 16, 105, 204, 46,
213, 220, 95, 25, 248, 10, 194, 135, 17, 162, 140, 121, 165, 226, 251, 9,
131, 70, 225, 242, 200, 69, 150, 179, 46, 245, 220, 71, 25, 242, 138, 197,
167, 19, 58, 141, 211, 37, 157, 219, 41, 155, 94, 235, 120, 79, 98, 180,
41, 183, 94, 246, 184, 70, 242, 178, 197, 181, 147, 55, 45, 214, 157, 158,
233, 168, 78, 254, 180, 64, 119, 112, 38, 164, 26, 251, 75, 3, 119, 65,
230, 176, 74, 244, 55, 7, 86, 130, 190, 225, 176, 72, 116, 54, 167, 86,
250, 190, 195, 48, 81, 212, 60, 95, 81, 248, 60, 66, 145, 241, 172, 68,
125, 243, 97, 133, 232, 99, 14, 169, 196, 126, 211, 96, 93, 232, 57, 142,
146, 228, 109, 139, 109, 167, 109, 186, 173, 179, 61, 181, 209, 183, 28, 118,
137, 230, 230, 202, 202, 215, 23, 30, 142, 136, 100, 102, 171, 106, 255, 111,
0, 44, 0, 29, 192, 9, 144, 6, 236, 2, 205, 193, 149, 144, 111, 44,
44, 29, 221, 201, 153, 150, 234, 238, 207, 12, 84, 5, 255, 67, 0, 49,
192, 20, 80, 15, 124, 4, 33, 195, 88, 81, 250, 188, 67, 49, 241, 212,
68, 95, 115, 120, 37, 226, 155, 9, 171, 70, 255, 114, 192, 37, 144, 27,
44, 11, 93, 199, 121, 146, 162, 237, 185, 141, 178, 229, 181, 139, 55, 39,
86, 154, 190, 235, 48, 79, 84, 52, 63, 87, 80, 62, 188, 16, 113, 204,
36, 85, 219, 127, 27, 96, 11, 104, 7, 110, 130, 172, 97, 189, 232, 113,
142, 164, 100, 123, 107, 99, 111, 105, 236, 46, 205, 220, 85, 153, 255, 42,
192, 31, 16, 8, 12, 6, 133, 194, 227, 17, 137, 204, 102, 213, 234, 223,
15, 24, 4, 10, 131, 71, 33, 242, 152, 69, 170, 179, 63, 53, 208, 23,
28, 14, 137, 196, 102, 211, 106, 221, 239, 25, 140, 10, 229, 199, 11, 18,
135, 77, 162, 181, 185, 183, 50, 246, 149, 134, 239, 34, 204, 25, 149, 202,
239, 23, 12, 14, 133, 196, 99, 19, 105, 205, 238, 213, 140, 95, 37, 248,
27, 2, 139, 65, 167, 112, 122, 164, 35, 59, 89, 211, 122, 221, 227, 25,
137, 202, 230, 215, 10, 222, 135, 24, 98, 138, 169, 167, 62, 250, 144, 67,
44, 49, 221, 212, 89, 159, 122, 232, 35, 14, 153, 196, 106, 211, 111, 29,
236, 9, 141, 198, 229, 146, 203, 45, 151, 93, 174, 185, 188, 114, 241, 229,
132, 75, 35, 119, 89, 230, 186, 202, 243, 23, 5, 206, 131, 20, 97, 207,
104, 84, 46, 191, 92, 112, 57, 228, 18, 203, 77, 151, 117, 174, 167, 60,
122, 145, 227, 44, 73, 221, 246, 217, 134, 218, 226, 219, 9, 155, 70, 235,
114, 207, 101, 148, 43, 47, 95, 92, 56, 57, 210, 146, 221, 173, 153, 189,
170, 241, 191, 4, 112, 3, 100, 1, 235, 64, 79, 112, 52, 36, 23, 91,
78, 187, 116, 115, 103, 101, 234, 171, 15, 63, 68, 16, 51, 76, 21, 245,
207, 7, 20, 2, 143, 65, 164, 48, 123, 84, 35, 127, 89, 224, 58, 200,
19, 22, 141, 206, 229, 148, 75, 47, 119, 92, 38, 185, 218, 242, 219, 5,
155, 67, 43, 113, 223, 100, 88, 43, 122, 159, 99, 40, 41, 222, 158, 216,
104, 90, 174, 187, 60, 115, 81, 229, 252, 75, 1, 247, 64, 70, 176, 50,
244, 21, 135, 79, 34, 180, 25, 183, 74, 246, 183, 6, 246, 130, 198, 225,
146, 200, 109, 150, 173, 174, 253, 188, 65, 177, 240, 116, 68, 39, 115, 90,
165, 251, 59, 3, 83, 65, 253, 240, 65, 132, 48, 99, 84, 41, 255, 94,
192, 56, 80, 18, 188, 13, 177, 197, 180, 83, 55, 125, 214, 161, 158, 248,
104, 66, 174, 177, 188, 116, 113, 231, 100, 74, 171, 119, 63, 102, 144, 42,
236, 31, 13, 200, 5, 150, 131, 46, 225, 220, 72, 89, 246, 186, 198, 243,
18, 197, 205, 147, 21, 173, 207, 61, 148, 17, 175, 76, 124, 53, 225, 215,
8, 94, 134, 184, 98, 242, 169, 133, 190, 227, 48, 73, 212, 54, 223, 86,
216, 62, 218, 144, 91, 44, 59, 93, 211, 121, 157, 226, 233, 137, 142, 230,
228, 74, 203, 119, 23, 102, 142, 170, 228, 127, 11, 96, 7, 104, 2, 174,
129, 188, 96, 113, 232, 36, 78, 155, 116, 107, 103, 111, 106, 172, 47, 61,
220, 17, 153, 204, 106, 213, 239, 31, 12, 8, 5, 198, 131, 18, 225, 205,
136, 85, 166, 191, 58, 240, 19, 4, 13, 195, 69, 145, 243, 44, 69, 221,
243, 25, 133, 202, 227, 23, 9, 206, 134, 212, 98, 223, 105, 152, 46, 234,
156, 79, 41, 244, 30, 199, 72, 82, 182, 189, 182, 241, 182, 196, 118, 211,
102, 221, 234, 217, 143, 26, 228, 11, 11, 71, 71, 114, 178, 165, 181, 187,
55, 51, 86, 149, 254, 239, 0, 76, 0, 53, 192, 23, 16, 14, 140, 4,
101, 195, 107, 17, 239, 76, 76, 53, 245, 215, 7, 30, 130, 136, 97, 166,
168, 122, 254, 163, 0, 121, 192, 34, 208, 25, 156, 10, 233, 199, 14, 210,
132, 93, 163, 121, 185, 226, 242, 201, 133, 150, 227, 46, 201, 220, 86, 217,
254, 218, 192, 91, 16, 59, 76, 19, 117, 205, 231, 21, 138, 143, 39, 36,
26, 155, 75, 43, 119, 95, 102, 184, 42, 242, 159, 5, 168, 3, 62, 129,
208, 96, 92, 40, 57, 222, 146, 216, 109, 154, 173, 171, 61, 191, 81, 176,
60, 116, 17, 231, 76, 74, 181, 247, 55, 6, 150, 130, 238, 225, 140, 72,
101, 246, 171, 6, 255, 66, 192, 49, 144, 20, 108, 15, 109, 196, 45, 147,
93, 173, 249, 189, 130, 241, 161, 132, 120, 99, 98, 169, 233, 190, 206, 240,
84, 68, 63, 115, 80, 37, 252, 27, 1, 203, 64, 87, 112, 62, 164, 16,
123, 76, 35, 117, 217, 231, 26, 202, 139, 23, 39, 78, 154, 180, 107, 55,
111, 86, 172, 62, 253, 208, 65, 156, 48, 105, 212, 46, 223, 92, 88, 57,
250, 146, 195, 45, 145, 221, 172, 89, 189, 250, 241, 131, 4, 97, 195, 104,
81, 238, 188, 76, 113, 245, 228, 71, 11, 114, 135, 101, 162, 171, 57, 191,
82, 240, 61, 132, 17, 163, 76, 121, 245, 226, 199, 9, 146, 134, 237, 162,
205, 185, 149, 178, 239, 53, 140, 23, 37, 206, 155, 20, 107, 79, 111, 116,
44, 39, 93, 218, 185, 155, 50, 235, 85, 143, 127, 36, 32, 27, 88, 11,
122, 135, 99, 34, 169, 217, 190, 218, 240, 91, 4, 59, 67, 83, 113, 253,
228, 65, 139, 112, 103, 100, 42, 171, 95, 63, 120, 16, 34, 140, 25, 165,
202, 251, 23, 3, 78, 129, 244, 96, 71, 104, 50, 174, 149, 188, 111, 49,
236, 20, 77, 207, 117, 148, 39, 47, 90, 156, 59, 41, 211, 94, 221, 248,
89, 130, 186, 225, 179, 8, 117, 198, 167, 18, 250, 141, 131, 37, 161, 219,
56, 91, 82, 187, 125, 179, 97, 181, 232, 119, 14, 166, 132, 122, 227, 99,
9, 233, 198, 206, 210, 212, 93, 159, 121, 168, 34, 254, 153, 128, 106, 224,
47, 8, 28, 6, 137, 194, 230, 209, 138, 220, 103, 25, 234, 138, 207, 39,
20, 26, 143, 75, 36, 55, 91, 86, 187, 126, 243, 96, 69, 232, 51, 14,
149, 196, 111, 19, 108, 13, 237, 197, 141, 147, 37, 173, 219, 61, 155, 81,
171, 124, 127, 97, 224, 40, 72, 30, 182, 136, 118, 230, 166, 202, 250, 215,
3, 30, 129, 200, 96, 86, 168, 62, 254, 144, 64, 108, 48, 45, 212, 29,
159, 73, 168, 54, 254, 150, 192, 110, 208, 44, 92, 29, 249, 201, 130, 214,
225, 158, 200, 104, 86, 174, 190, 252, 112, 65, 228, 48, 75, 84, 55, 127,
86, 160, 62, 248, 16, 66, 140, 49, 165, 212, 123, 31, 99, 72, 41, 246,
158, 198, 232, 82, 206, 189, 148, 113, 175, 100, 124, 43, 97, 223, 104, 88,
46, 186, 156, 115, 41, 229, 222, 203, 24, 87, 74, 190, 183, 48, 118, 148,
38, 239, 90, 204, 59, 21, 211, 79, 29, 244, 9, 135, 70, 226, 178, 201,
181, 150, 247, 46, 198, 156, 82, 233, 253, 142, 193, 164, 80, 123, 124, 35,
97, 217, 232, 90, 206, 187, 20, 115, 79, 101, 244, 43, 7, 95, 66, 184,
49, 178, 148, 117, 175, 103, 60, 42, 145, 223, 44, 88, 29, 250, 137, 131,
38, 225, 218, 200, 91, 22, 187, 78, 243, 116, 69, 231, 115, 10, 165, 199,
59, 18, 147, 77, 173, 245, 189, 135, 49, 162, 148, 121, 175, 98, 252, 41,
129, 222, 224, 88, 72, 58, 182, 147, 54, 237, 214, 205, 158, 213, 168, 95,
62, 184, 16, 114, 140, 37, 165, 219, 59, 27, 83, 75, 125, 247, 97, 134,
168, 98, 254, 169, 128, 126, 224, 32, 72, 24, 54, 138, 150, 231, 46, 202,
156, 87, 41, 254, 158, 192, 104, 80, 46, 188, 28, 113, 201, 228, 86, 203,
126, 215, 96, 94, 168, 56, 126, 146, 160, 109, 184, 45, 178, 157, 181, 169,
183, 62, 246, 144, 70, 236, 50, 205, 213, 149, 159, 47, 40, 28, 30, 137,
200, 102, 214, 170, 222, 255, 24, 64, 10, 176, 7, 52, 2, 151, 65, 174,
176, 124, 116, 33, 231, 88, 74, 186, 183, 51, 54, 149, 214, 239, 30, 204,
8, 85, 198, 191, 18, 240, 13, 132, 5, 163, 67, 57, 241, 210, 196, 93,
147, 121, 173, 226, 253, 137, 129, 166, 224, 122, 200, 35, 22, 153, 206, 234,
212, 79, 31, 116, 8, 39, 70, 154, 178, 235, 53, 143, 87, 36, 62, 155,
80, 107, 124, 47, 97, 220, 40, 89, 222, 186, 216, 115, 26, 165, 203, 59,
23, 83, 78, 189, 244, 113, 135, 100, 98, 171, 105, 191, 110, 240, 44, 68,
29, 243, 73, 133, 246, 227, 6, 201, 194, 214, 209, 158, 220, 104, 89, 238,
186, 204, 115, 21, 229, 207, 11, 20, 7, 79, 66, 180, 49, 183, 84, 118,
191, 102, 240, 42, 196, 31, 19, 72, 13, 246, 133, 134, 227, 34, 201, 217,
150, 218, 238, 219, 12, 91, 69, 251, 115, 3, 101, 193, 235, 16, 79, 76,
52, 53, 215, 87, 30, 190, 136, 112, 102, 164, 42, 251, 95, 3, 120, 1,
226, 128, 73, 160, 54, 248, 22, 194, 142, 209, 164, 92, 123, 121, 227, 98,
201, 233, 150, 206, 238, 212, 76, 95, 117, 248, 39, 2, 154, 129, 171, 32,
127, 88, 32, 58, 152, 19, 42, 141, 223, 37, 152, 27, 42, 139, 95, 39,
120, 26, 162, 139, 57, 167, 82, 250, 189, 131, 49, 161, 212, 120, 95, 98,
184, 41, 178, 158, 245, 168, 71, 62, 178, 144, 117, 172, 39, 61, 218, 145,
155, 44, 107, 93, 239, 121, 140, 34, 229, 217, 139, 26, 231, 75, 10, 183,
71, 54, 178, 150, 245, 174, 199, 60, 82, 145, 253, 172, 65, 189, 240, 113,
132, 36, 99, 91, 105, 251, 110, 195, 108, 81, 237, 252, 77, 129, 245, 160,
71, 56, 50, 146, 149, 173, 175, 61, 188, 17, 177, 204, 116, 85, 231, 127,
10, 160, 7, 56, 2, 146, 129, 173, 160, 125, 184, 33, 178, 152, 117, 170,
167, 63, 58, 144, 19, 44, 13, 221, 197, 153, 147, 42, 237, 223, 13, 152,
5, 170, 131, 63, 33, 208, 24, 92, 10, 185, 199, 50, 210, 149, 157, 175,
41, 188, 30, 241, 200, 68, 86, 179, 126, 245, 224, 71, 8, 50, 134, 149,
162, 239, 57, 140, 18, 229, 205, 139, 21, 167, 79, 58, 180, 19, 55, 77,
214, 181, 158, 247, 40, 70, 158, 178, 232, 117, 142, 167, 36, 122, 155, 99,
43, 105, 223, 110, 216, 44, 90, 157, 251, 41, 131, 94, 225, 248, 72, 66,
182, 177, 182, 244, 118, 199, 102, 210, 170, 221, 191, 25, 176, 10, 244, 7,
7, 66, 130, 177, 161, 180, 120, 119, 98, 166, 169, 186, 254, 243, 0, 69,
192, 51, 16, 21, 204, 15, 21, 196, 15, 19, 68, 13, 243, 69, 133, 243,
35, 5, 217, 195, 26, 209, 203, 28, 87, 73, 254, 182, 192, 118, 208, 38,
220, 26, 217, 203, 26, 215, 75, 30, 183, 72, 118, 182, 166, 246, 250, 198,
195, 18, 209, 205, 156, 85, 169, 255, 62, 192, 16, 80, 12, 60, 5, 209,
195, 28, 81, 201, 252, 86, 193, 254, 208, 64, 92, 48, 57, 212, 18, 223,
77, 152, 53, 170, 151, 63, 46, 144, 28, 108, 9, 237, 198, 205, 146, 213,
173, 159, 61, 168, 17, 190, 140, 112, 101, 228, 43, 11, 95, 71, 120, 50,
162, 149, 185, 175, 50, 252, 21, 129, 207, 32, 84, 24, 63, 74, 144, 55,
44, 22, 157, 206, 233, 148, 78, 239, 116, 76, 39, 117, 218, 167, 27, 58,
139, 83, 39, 125, 218, 161, 155, 56, 107, 82, 175, 125, 188, 33, 177, 216,
116, 90, 167, 123, 58, 163, 83, 57, 253, 210, 193, 157, 144, 105, 172, 46,
253, 220, 65, 153, 240, 106, 196, 47, 19, 92, 13, 249, 197, 130, 211, 33,
157, 216, 105, 154, 174, 235, 60, 79, 81, 244, 60, 71, 81, 242, 188, 69,
177, 243, 52, 69, 215, 115, 30, 165, 200, 123, 22, 163, 78, 249, 244, 66,
199, 113, 146, 164, 109, 187, 109, 179, 109, 181, 237, 183, 13, 182, 133, 182,
227, 54, 201, 214, 214, 222, 222, 216, 88, 90, 186, 187, 51, 51, 255, 63 )
random_mask_vec8 = numpy.array(random_mask_tuple, numpy.uint8)
| gpl-3.0 |
KohlsTechnology/ansible | lib/ansible/modules/storage/infinidat/infini_export_client.py | 43 | 5489 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_export_client
version_added: 2.3
short_description: Create, Delete or Modify NFS Client(s) for existing exports on Infinibox
description:
- This module creates, deletes or modifys NFS client(s) for existing exports on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
client:
description:
- Client IP or Range. Ranges can be defined as follows
192.168.0.1-192.168.0.254.
aliases: ['name']
required: true
state:
description:
- Creates/Modifies client when present and removes when absent.
required: false
default: "present"
choices: [ "present", "absent" ]
access_mode:
description:
- Read Write or Read Only Access.
choices: [ "RW", "RO" ]
default: RW
required: false
no_root_squash:
description:
- Don't squash root user to anonymous. Will be set to "no" on creation if not specified explicitly.
type: bool
default: no
required: false
export:
description:
- Name of the export.
required: true
extends_documentation_fragment:
- infinibox
requirements:
- munch
'''
EXAMPLES = '''
- name: Make sure nfs client 10.0.0.1 is configured for export. Allow root access
infini_export_client:
client: 10.0.0.1
access_mode: RW
no_root_squash: yes
export: /data
user: admin
password: secret
system: ibox001
- name: Add multiple clients with RO access. Squash root privileges
infini_export_client:
client: "{{ item }}"
access_mode: RO
no_root_squash: no
export: /data
user: admin
password: secret
system: ibox001
with_items:
- 10.0.0.2
- 10.0.0.3
'''
RETURN = '''
'''
try:
from munch import Munch, unmunchify
HAS_MUNCH = True
except ImportError:
HAS_MUNCH = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.infinibox import HAS_INFINISDK, api_wrapper, get_system, infinibox_argument_spec
def transform(d):
return frozenset(d.items())
@api_wrapper
def get_export(module, system):
"""Retrun export if found. Fail module if not found"""
try:
export = system.exports.get(export_path=module.params['export'])
except:
module.fail_json(msg="Export with export path {} not found".format(module.params['export']))
return export
@api_wrapper
def update_client(module, export):
"""Update export client list"""
changed = False
client = module.params['client']
access_mode = module.params['access_mode']
no_root_squash = module.params['no_root_squash']
client_list = export.get_permissions()
client_not_in_list = True
for index, item in enumerate(client_list):
if item.client == client:
client_not_in_list = False
if item.access != access_mode:
item.access = access_mode
changed = True
if item.no_root_squash is not no_root_squash:
item.no_root_squash = no_root_squash
changed = True
# If access_mode and/or no_root_squash not passed as arguments to the module,
# use access_mode with RW value and set no_root_squash to False
if client_not_in_list:
changed = True
client_list.append(Munch(client=client, access=access_mode, no_root_squash=no_root_squash))
if changed:
for index, item in enumerate(client_list):
client_list[index] = unmunchify(item)
if not module.check_mode:
export.update_permissions(client_list)
module.exit_json(changed=changed)
@api_wrapper
def delete_client(module, export):
"""Update export client list"""
changed = False
client = module.params['client']
client_list = export.get_permissions()
for index, item in enumerate(client_list):
if item.client == client:
changed = True
del client_list[index]
if changed:
for index, item in enumerate(client_list):
client_list[index] = unmunchify(item)
if not module.check_mode:
export.update_permissions(client_list)
module.exit_json(changed=changed)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
client=dict(required=True),
access_mode=dict(choices=['RO', 'RW'], default='RW'),
no_root_squash=dict(type='bool', default='no'),
state=dict(default='present', choices=['present', 'absent']),
export=dict(required=True)
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
if not HAS_MUNCH:
module.fail_json(msg='the python munch library is required for this module')
system = get_system(module)
export = get_export(module, system)
if module.params['state'] == 'present':
update_client(module, export)
else:
delete_client(module, export)
if __name__ == '__main__':
main()
| gpl-3.0 |
dhermes/gcloud-python | texttospeech/google/cloud/texttospeech_v1/gapic/transports/text_to_speech_grpc_transport.py | 2 | 4586 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.texttospeech_v1.proto import cloud_tts_pb2_grpc
class TextToSpeechGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.texttospeech.v1 TextToSpeech API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self, channel=None, credentials=None, address="texttospeech.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"text_to_speech_stub": cloud_tts_pb2_grpc.TextToSpeechStub(channel)
}
@classmethod
def create_channel(
cls, address="texttospeech.googleapis.com:443", credentials=None
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def list_voices(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns a list of ``Voice`` supported for synthesis.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["text_to_speech_stub"].ListVoices
@property
def synthesize_speech(self):
"""Return the gRPC stub for {$apiMethod.name}.
Synthesizes speech synchronously: receive results after all text input
has been processed.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["text_to_speech_stub"].SynthesizeSpeech
| apache-2.0 |
codilime/cloudify-agent | cloudify_agent/installer/config/configuration.py | 1 | 12706 | #########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import getpass
import os
import platform
from cloudify import ctx
from cloudify import context
from cloudify import constants
from cloudify import utils as cloudify_utils
from cloudify_agent.api import utils as agent_utils
from cloudify_agent.installer import exceptions
from cloudify_agent.installer.config.decorators import group
from cloudify_agent.installer.config.attributes import raise_missing_attribute
from cloudify_agent.installer.config.attributes import raise_missing_attributes
def prepare_connection(cloudify_agent):
connection_attributes(cloudify_agent)
def prepare_agent(cloudify_agent, runner):
cfy_agent_attributes(cloudify_agent)
installation_attributes(cloudify_agent, runner)
@group('connection')
def connection_attributes(cloudify_agent):
if 'local' not in cloudify_agent:
cloudify_agent['local'] = ctx.type == context.DEPLOYMENT
if cloudify_agent['local']:
# if installing an agent locally, we auto-detect which
# os the agent is dedicated for
cloudify_agent['windows'] = os.name == 'nt'
# if installing locally, we install the agent with the same user the
# current agent is running under. we don't care about any other
# connection details
cloudify_agent['user'] = getpass.getuser()
if 'remote_execution' not in cloudify_agent:
cloudify_agent['remote_execution'] = True
else:
if 'remote_execution' not in cloudify_agent:
install_method = cloudify_utils.internal.get_install_method(
ctx.node.properties)
if install_method not in constants.AGENT_INSTALL_METHODS:
raise exceptions.AgentInstallerConfigurationError(
'agent_config.install_method must be one of {0}'
' but found: {1}'.format(constants.AGENT_INSTALL_METHODS,
install_method))
remote_execution = (install_method ==
constants.AGENT_INSTALL_METHOD_REMOTE)
cloudify_agent.update({
'remote_execution': remote_execution,
'install_method': install_method
})
if 'windows' not in cloudify_agent:
if ctx.plugin == 'windows_agent_installer':
# 3.2 Compute node, installing windows
cloudify_agent['windows'] = True
elif ctx.plugin == 'agent_installer':
# 3.2 Compute node, installing linux
cloudify_agent['windows'] = False
else:
# 3.3 Compute node, determine by new property 'os_family'
cloudify_agent['windows'] = ctx.node.properties[
'os_family'].lower() == 'windows'
# support 'ip' attribute as direct node property or runtime
# property (as opposed to nested inside the cloudify_agent dict)
ip = ctx.instance.runtime_properties.get('ip')
if not ip:
ip = ctx.node.properties.get('ip')
if not ip:
ip = cloudify_agent.get('ip')
if not ip and cloudify_agent['remote_execution']:
# a remote installation requires the ip to connect to.
raise_missing_attribute('ip')
if ip:
cloudify_agent['ip'] = ip
# support password as direct node property or runtime
# property (as opposed to nested inside the cloudify_agent dict)
password = ctx.instance.runtime_properties.get('password')
if not password:
password = ctx.node.properties.get('password')
if not password:
password = cloudify_agent.get('password')
if not password and cloudify_agent['windows'] \
and cloudify_agent['remote_execution']:
# a remote windows installation requires a
# password to connect to the machine
raise_missing_attribute('password')
if password:
cloudify_agent['password'] = password
# a remote installation requires the username
# that the agent will run under.
if not cloudify_agent.get('user'):
raise_missing_attribute('user')
# a remote linux installation requires either a password or a key file
# in order to connect to the remote machine.
if not cloudify_agent['windows'] and \
not cloudify_agent.get('password') and \
not cloudify_agent.get('key') and \
cloudify_agent['remote_execution']:
raise_missing_attributes('key', 'password')
@group('cfy-agent')
def cfy_agent_attributes(cloudify_agent):
_cfy_agent_attributes_no_defaults(cloudify_agent)
def _cfy_agent_attributes_no_defaults(cloudify_agent):
if not cloudify_agent.get('process_management'):
cloudify_agent['process_management'] = {}
if not cloudify_agent['process_management'].get('name'):
# user did not specify process management configuration, choose the
# default one according to os type.
if cloudify_agent['windows']:
name = 'nssm'
else:
name = 'init.d'
cloudify_agent['process_management']['name'] = name
if not cloudify_agent.get('name'):
if cloudify_agent['local']:
workflows_worker = cloudify_agent.get('workflows_worker', False)
suffix = '_workflows' if workflows_worker else ''
name = '{0}{1}'.format(ctx.deployment.id, suffix)
else:
name = ctx.instance.id
cloudify_agent['name'] = name
if not cloudify_agent.get('queue'):
# by default, queue of the agent is the same as the name
cloudify_agent['queue'] = cloudify_agent['name']
if not cloudify_agent.get('file_server_host'):
cloudify_agent['file_server_host'] = \
cloudify_utils.get_manager_file_server_host()
if not cloudify_agent.get('file_server_port'):
cloudify_agent['file_server_port'] = \
cloudify_utils.get_manager_file_server_port()
if not cloudify_agent.get('file_server_protocol'):
cloudify_agent['file_server_protocol'] = \
cloudify_utils.get_manager_file_server_protocol()
if not cloudify_agent.get('rest_host'):
cloudify_agent['rest_host'] = \
cloudify_utils.get_manager_rest_service_host()
if not cloudify_agent.get('security_enabled'):
cloudify_agent['security_enabled'] = \
cloudify_utils.is_security_enabled()
if not cloudify_agent.get('rest_protocol'):
cloudify_agent['rest_protocol'] = \
cloudify_utils.get_manager_rest_service_protocol()
if not cloudify_agent.get('rest_port'):
cloudify_agent['rest_port'] = \
cloudify_utils.get_manager_rest_service_port()
if not cloudify_agent.get('rest_username'):
cloudify_agent['rest_username'] = \
cloudify_utils.get_rest_username()
if not cloudify_agent.get('rest_password'):
cloudify_agent['rest_password'] = \
cloudify_utils.get_rest_password()
if not cloudify_agent.get('rest_token'):
cloudify_agent['rest_token'] = \
cloudify_utils.get_rest_token()
if not cloudify_agent.get('rest_cert_content'):
cloudify_agent['rest_cert_content'] = \
cloudify_utils.get_rest_cert_content()
if not cloudify_agent.get('verify_rest_certificate'):
cloudify_agent['verify_rest_certificate'] = \
cloudify_utils.is_verify_rest_certificate()
if not cloudify_agent.get('bypass_maintenance'):
cloudify_agent['bypass_maintenance_mode'] = \
cloudify_utils.get_is_bypass_maintenance()
def directory_attributes(cloudify_agent):
if not cloudify_agent.get('agent_dir'):
name = cloudify_agent['name']
basedir = cloudify_agent['basedir']
if cloudify_agent['windows']:
agent_dir = '{0}\\{1}'.format(basedir, name)
else:
agent_dir = os.path.join(basedir, name)
cloudify_agent['agent_dir'] = agent_dir
if not cloudify_agent.get('workdir'):
agent_dir = cloudify_agent['agent_dir']
if cloudify_agent['windows']:
workdir = '{0}\\{1}'.format(agent_dir, 'work')
else:
workdir = os.path.join(agent_dir, 'work')
cloudify_agent['workdir'] = workdir
if not cloudify_agent.get('envdir'):
agent_dir = cloudify_agent['agent_dir']
if cloudify_agent['windows']:
envdir = '{0}\\{1}'.format(agent_dir, 'env')
else:
envdir = os.path.join(agent_dir, 'env')
cloudify_agent['envdir'] = envdir
if not cloudify_agent.get('agent_rest_cert_path'):
cloudify_agent['agent_rest_cert_path'] = \
cloudify_utils.get_agent_rest_cert_path()
if not cloudify_agent.get('broker_ssl_cert_path'):
cloudify_agent['broker_ssl_cert_path'] = \
cloudify_utils.get_broker_ssl_cert_path()
@group('installation')
def _add_installation_defaults(cloudify_agent):
pass
@group('cfy-agent')
def _add_cfy_agent_defaults(cloudify_agent):
pass
def reinstallation_attributes(cloudify_agent):
_cfy_agent_attributes_no_defaults(cloudify_agent)
_add_cfy_agent_defaults(cloudify_agent)
if cloudify_agent.get('basedir'):
directory_attributes(cloudify_agent)
_add_installation_defaults(cloudify_agent)
@group('installation')
def installation_attributes(cloudify_agent, runner):
if (not cloudify_agent.get('source_url') and
not cloudify_agent.get('package_url')):
if cloudify_agent['windows']:
# no distribution difference in windows installation
cloudify_agent['package_url'] = '{0}/packages/agents' \
'/cloudify-windows-agent.exe'\
.format(cloudify_utils.get_manager_file_server_url())
else:
if not cloudify_agent.get('distro'):
if cloudify_agent['local']:
cloudify_agent['distro'] = platform.dist()[0].lower()
elif cloudify_agent['remote_execution']:
dist = runner.machine_distribution()
cloudify_agent['distro'] = dist[0].lower()
if not cloudify_agent.get('distro_codename'):
if cloudify_agent['local']:
cloudify_agent['distro_codename'] = platform.dist()[
2].lower()
elif cloudify_agent['remote_execution']:
dist = runner.machine_distribution()
cloudify_agent['distro_codename'] = dist[2].lower()
if ('distro' in cloudify_agent and
'distro_codename' in cloudify_agent):
cloudify_agent['package_url'] = '{0}/packages/agents' \
'/{1}-{2}-agent.tar.gz' \
.format(cloudify_utils.get_manager_file_server_url(),
cloudify_agent['distro'],
cloudify_agent['distro_codename'])
if not cloudify_agent.get('basedir'):
if cloudify_agent['local']:
basedir = agent_utils.get_home_dir(cloudify_agent['user'])
else:
if cloudify_agent['windows']:
# can't seem to figure out how to get the home_dir remotely
# on windows. same was as fabric wont work because the
# 'pwd' module does not exists in a windows python
# installation.
# TODO - maybe use some environment variables heuristics?
basedir = \
agent_utils.get_windows_home_dir(cloudify_agent['user'])
elif cloudify_agent['remote_execution']:
basedir = runner.home_dir(cloudify_agent['user'])
else:
basedir = '~{0}'.format(cloudify_agent['user'])
cloudify_agent['basedir'] = basedir
directory_attributes(cloudify_agent)
| apache-2.0 |
shakaran/casterly | money/views.py | 1 | 1281 | from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from money.forms import UploadCSVstatementForm
from money.models import Movement, MovementCategory
def movements_list(request):
movements = Movement.objects.all()
return render(request, "money/movements_list.html", {
"movements": movements,
})
def upload_estatement(request):
form = UploadCSVstatementForm()
if request.method == "POST":
form = UploadCSVstatementForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('movements_list'))
return render(request, "money/upload_estatement.html", {
"form": form,
})
@csrf_exempt # TOOD very very wrong, temporary fix
def inline_category_edit(request):
if request.is_ajax:
category_id = request.POST["category"]
movement_id = request.POST["movement"]
movement = Movement.objects.get(pk=movement_id)
if category_id == 'None':
movement.category = None
else:
movement.category = MovementCategory.objects.get(pk=category_id)
movement.save()
return HttpResponse("Ok", content_type="text/plain")
return HttpResponseForbidden()
| bsd-3-clause |
scy-phy/minicps | bin/pox-init.py | 2 | 1959 | #!/usr/bin/env python
"""
Script to perform pox init.
Pox dir defaults to ~/pox
minicps dir defaults to ~/minicps
Use POX_COMPONENTS list to add new symlinks.
Use -v for verbose and -vv for colored verbose.
"""
import argparse
import os
POX_COMPONENTS = [
'antiarppoison.py',
'hub.py',
'l2_pairs.py',
'l2_learning.py',
]
SWAT = [
'pox_controller.py',
]
PARSER_DESC = 'perform minicps pox init'
parser = argparse.ArgumentParser(description=PARSER_DESC)
parser.add_argument(
"-v", "--verbose",
help='print more info',
action="count",
default=0)
parser.add_argument(
"-p", "--pox",
help='pox dir (defaults to ~/pox)',
default='~/pox')
parser.add_argument(
"-m", "--minicps",
help='minicps dir (defaults to ~/minicps)',
default='~/minicps')
args = parser.parse_args()
print "pox path is: %s" % args.pox
print "minicps path is: %s" % args.minicps
choice = raw_input('please confirm: [Yn]')
if choice == 'n' or choice == 'no':
print "Abort init.py"
else:
# POX scripts
TARGET = '%s/scripts/pox' % args.minicps
LINK = '%s/ext' % args.pox
for c in POX_COMPONENTS:
command = 'ln -s %s/%s %s/%s' % (TARGET, c, LINK, c)
# print "DEBUG:", command
os.system(command)
if args.verbose >= 2:
vv_command = 'ls --color=auto -l %s/%s' % (LINK, c)
os.system(vv_command)
elif args.verbose >= 1:
v_command = 'ls -l %s/%s' % (LINK, c)
os.system(v_command)
# SWAT
TARGET = '%s/examples/swat' % args.minicps
for c in SWAT:
command = 'ln -s %s/%s %s/%s' % (TARGET, c, LINK, c)
# print "DEBUG:", command
os.system(command)
if args.verbose >= 2:
vv_command = 'ls --color=auto -l %s/%s' % (LINK, c)
os.system(vv_command)
elif args.verbose >= 1:
v_command = 'ls -l %s/%s' % (LINK, c)
os.system(v_command)
| mit |
r-mibu/ceilometer | ceilometer/image/notifications.py | 2 | 3994 | #
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handler for producing image metering messages from glance notification
events.
"""
from oslo_config import cfg
import oslo_messaging
from ceilometer.agent import plugin_base
from ceilometer import sample
OPTS = [
cfg.StrOpt('glance_control_exchange',
default='glance',
help="Exchange name for Glance notifications."),
]
cfg.CONF.register_opts(OPTS)
class ImageBase(plugin_base.NotificationBase):
"""Base class for image counting."""
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo_messaging.Target
This sequence is defining the exchange and topics to be connected for
this plugin.
"""
return [oslo_messaging.Target(topic=topic,
exchange=conf.glance_control_exchange)
for topic in conf.notification_topics]
class ImageCRUDBase(ImageBase):
event_types = [
'image.update',
'image.upload',
'image.delete',
]
class ImageCRUD(ImageCRUDBase, plugin_base.NonMetricNotificationBase):
def process_notification(self, message):
yield sample.Sample.from_notification(
name=message['event_type'],
type=sample.TYPE_DELTA,
unit='image',
volume=1,
resource_id=message['payload']['id'],
user_id=None,
project_id=message['payload']['owner'],
message=message)
class Image(ImageCRUDBase, plugin_base.NonMetricNotificationBase):
def process_notification(self, message):
yield sample.Sample.from_notification(
name='image',
type=sample.TYPE_GAUGE,
unit='image',
volume=1,
resource_id=message['payload']['id'],
user_id=None,
project_id=message['payload']['owner'],
message=message)
class ImageSize(ImageCRUDBase):
def process_notification(self, message):
yield sample.Sample.from_notification(
name='image.size',
type=sample.TYPE_GAUGE,
unit='B',
volume=message['payload']['size'],
resource_id=message['payload']['id'],
user_id=None,
project_id=message['payload']['owner'],
message=message)
class ImageDownload(ImageBase):
"""Emit image_download sample when an image is downloaded."""
event_types = ['image.send']
def process_notification(self, message):
yield sample.Sample.from_notification(
name='image.download',
type=sample.TYPE_DELTA,
unit='B',
volume=message['payload']['bytes_sent'],
resource_id=message['payload']['image_id'],
user_id=message['payload']['receiver_user_id'],
project_id=message['payload']['receiver_tenant_id'],
message=message)
class ImageServe(ImageBase):
"""Emit image_serve sample when an image is served out."""
event_types = ['image.send']
def process_notification(self, message):
yield sample.Sample.from_notification(
name='image.serve',
type=sample.TYPE_DELTA,
unit='B',
volume=message['payload']['bytes_sent'],
resource_id=message['payload']['image_id'],
user_id=None,
project_id=message['payload']['owner_id'],
message=message)
| apache-2.0 |
pgmillon/ansible | lib/ansible/modules/windows/win_mapped_drive.py | 45 | 4162 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub, actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_mapped_drive
version_added: '2.4'
short_description: Map network drives for users
description:
- Allows you to modify mapped network drives for individual users.
options:
letter:
description:
- The letter of the network path to map to.
- This letter must not already be in use with Windows.
type: str
required: yes
password:
description:
- The password for C(username) that is used when testing the initial
connection.
- This is never saved with a mapped drive, use the M(win_credential) module
to persist a username and password for a host.
type: str
path:
description:
- The UNC path to map the drive to.
- This is required if C(state=present).
- If C(state=absent) and I(path) is not set, the module will delete the
mapped drive regardless of the target.
- If C(state=absent) and the I(path) is set, the module will throw an error
if path does not match the target of the mapped drive.
type: path
state:
description:
- If C(present) will ensure the mapped drive exists.
- If C(absent) will ensure the mapped drive does not exist.
type: str
choices: [ absent, present ]
default: present
username:
description:
- The username that is used when testing the initial connection.
- This is never saved with a mapped drive, the the M(win_credential) module
to persist a username and password for a host.
- This is required if the mapped drive requires authentication with
custom credentials and become, or CredSSP cannot be used.
- If become or CredSSP is used, any credentials saved with
M(win_credential) will automatically be used instead.
type: str
notes:
- You cannot use this module to access a mapped drive in another Ansible task,
drives mapped with this module are only accessible when logging in
interactively with the user through the console or RDP.
- It is recommend to run this module with become or CredSSP when the remote
path requires authentication.
- When using become or CredSSP, the task will have access to any local
credentials stored in the user's vault.
- If become or CredSSP is not available, the I(username) and I(password)
options can be used for the initial authentication but these are not
persisted.
seealso:
- module: win_credential
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Create a mapped drive under Z
win_mapped_drive:
letter: Z
path: \\domain\appdata\accounting
- name: Delete any mapped drives under Z
win_mapped_drive:
letter: Z
state: absent
- name: Only delete the mapped drive Z if the paths match (error is thrown otherwise)
win_mapped_drive:
letter: Z
path: \\domain\appdata\accounting
state: absent
- name: Create mapped drive with credentials and save the username and password
block:
- name: Save the network credentials required for the mapped drive
win_credential:
name: server
type: domain_password
username: username@DOMAIN
secret: Password01
state: present
- name: Create a mapped drive that requires authentication
win_mapped_drive:
letter: M
path: \\SERVER\C$
state: present
vars:
# become is required to save and retrieve the credentials in the tasks
ansible_become: yes
ansible_become_method: runas
ansible_become_user: '{{ ansible_user }}'
ansible_become_pass: '{{ ansible_password }}'
- name: Create mapped drive with credentials that do not persist on the next logon
win_mapped_drive:
letter: M
path: \\SERVER\C$
state: present
username: '{{ ansible_user }}'
password: '{{ ansible_password }}'
'''
RETURN = r'''
'''
| gpl-3.0 |
ThirdProject/android_external_chromium_org | third_party/protobuf/python/google/protobuf/internal/decoder.py | 223 | 26136 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(mask):
"""Like _VarintDecoder() but decodes signed values."""
local_ord = ord
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = local_ord(buffer[pos])
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
_DecodeVarint = _VarintDecoder((1 << 64) - 1)
_DecodeSignedVarint = _SignedVarintDecoder((1 << 64) - 1)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1)
_DecodeSignedVarint32 = _SignedVarintDecoder((1 << 32) - 1)
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos)
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if ((float_bytes[3] in '\x7F\xFF')
and (float_bytes[2] >= '\x80')):
# If at least one significand bit is set...
if float_bytes[0:3] != '\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3] == '\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7] in '\x7F\xFF')
and (double_bytes[6] >= '\xF0')
and (double_bytes[0:7] != '\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
# --------------------------------------------------------------------
Int32Decoder = EnumDecoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = unicode
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(local_unicode(buffer[pos:new_pos], 'utf-8'))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = local_unicode(buffer[pos:new_pos], 'utf-8')
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(extensions_by_number):
"""Returns a decoder for a MessageSet item.
The parameter is the _extensions_by_number map for the message class.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = extensions_by_number.get(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG,
buffer[message_set_item_start:pos]))
return pos
return DecodeItem
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
while ord(buffer[pos]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
local_ord = ord
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = local_ord(tag_bytes[0]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
| bsd-3-clause |
tianweizhang/nova | nova/virt/hyperv/snapshotops.py | 18 | 5368 | # Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM snapshot operations.
"""
import os
from oslo.config import cfg
from nova.compute import task_states
from nova.i18n import _
from nova.image import glance
from nova.openstack.common import log as logging
from nova.virt.hyperv import utilsfactory
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SnapshotOps(object):
def __init__(self):
self._pathutils = utilsfactory.get_pathutils()
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
def _save_glance_image(self, context, image_id, image_vhd_path):
(glance_image_service,
image_id) = glance.get_remote_image_service(context, image_id)
image_metadata = {"is_public": False,
"disk_format": "vhd",
"container_format": "bare",
"properties": {}}
with self._pathutils.open(image_vhd_path, 'rb') as f:
glance_image_service.update(context, image_id, image_metadata, f)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
instance_name = instance["name"]
LOG.debug("Creating snapshot for instance %s", instance_name)
snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
export_dir = None
try:
src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
LOG.debug("Getting info for VHD %s", src_vhd_path)
src_base_disk_path = self._vhdutils.get_vhd_parent_path(
src_vhd_path)
export_dir = self._pathutils.get_export_dir(instance_name)
dest_vhd_path = os.path.join(export_dir, os.path.basename(
src_vhd_path))
LOG.debug('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s',
{'src_vhd_path': src_vhd_path,
'dest_vhd_path': dest_vhd_path})
self._pathutils.copyfile(src_vhd_path, dest_vhd_path)
image_vhd_path = None
if not src_base_disk_path:
image_vhd_path = dest_vhd_path
else:
basename = os.path.basename(src_base_disk_path)
dest_base_disk_path = os.path.join(export_dir, basename)
LOG.debug('Copying base disk %(src_vhd_path)s to '
'%(dest_base_disk_path)s',
{'src_vhd_path': src_vhd_path,
'dest_base_disk_path': dest_base_disk_path})
self._pathutils.copyfile(src_base_disk_path,
dest_base_disk_path)
LOG.debug("Reconnecting copied base VHD "
"%(dest_base_disk_path)s and diff "
"VHD %(dest_vhd_path)s",
{'dest_base_disk_path': dest_base_disk_path,
'dest_vhd_path': dest_vhd_path})
self._vhdutils.reconnect_parent_vhd(dest_vhd_path,
dest_base_disk_path)
LOG.debug("Merging base disk %(dest_base_disk_path)s and "
"diff disk %(dest_vhd_path)s",
{'dest_base_disk_path': dest_base_disk_path,
'dest_vhd_path': dest_vhd_path})
self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path)
image_vhd_path = dest_base_disk_path
LOG.debug("Updating Glance image %(image_id)s with content from "
"merged disk %(image_vhd_path)s",
{'image_id': image_id, 'image_vhd_path': image_vhd_path})
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._save_glance_image(context, image_id, image_vhd_path)
LOG.debug("Snapshot image %(image_id)s updated for VM "
"%(instance_name)s",
{'image_id': image_id, 'instance_name': instance_name})
finally:
try:
LOG.debug("Removing snapshot %s", image_id)
self._vmutils.remove_vm_snapshot(snapshot_path)
except Exception as ex:
LOG.exception(ex)
LOG.warning(_('Failed to remove snapshot for VM %s')
% instance_name)
if export_dir:
LOG.debug('Removing directory: %s', export_dir)
self._pathutils.rmtree(export_dir)
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/idlelib/idle_test/test_parenmatch.py | 43 | 3822 | """Test idlelib.ParenMatch."""
# This must currently be a gui test because ParenMatch methods use
# several text methods not defined on idlelib.idle_test.mock_tk.Text.
import unittest
from test.test_support import requires
from Tkinter import Tk, Text
from idlelib.ParenMatch import ParenMatch
class Mock: # 2.7 does not have unittest.mock
def __init__(self, *args, **kwargs):
self.called = False
def __call__(self, *args, **kwargs):
self.called = True
def reset_mock(self, *args, **kwargs):
self.called = False
def after(self, *args, **kwargs):
pass
class DummyEditwin:
def __init__(self, text):
self.text = text
self.indentwidth = 8
self.tabwidth = 8
self.context_use_ps1 = True
class ParenMatchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.text = Text(cls.root)
cls.editwin = DummyEditwin(cls.text)
cls.editwin.text_frame = Mock()
@classmethod
def tearDownClass(cls):
del cls.text, cls.editwin
cls.root.destroy()
del cls.root
def tearDown(self):
self.text.delete('1.0', 'end')
def test_paren_expression(self):
"""
Test ParenMatch with 'expression' style.
"""
text = self.text
pm = ParenMatch(self.editwin)
pm.set_style('expression')
text.insert('insert', 'def foobar(a, b')
pm.flash_paren_event('event')
self.assertIn('<<parenmatch-check-restore>>', text.event_info())
self.assertTupleEqual(text.tag_prevrange('paren', 'end'),
('1.10', '1.15'))
text.insert('insert', ')')
pm.restore_event()
self.assertNotIn('<<parenmatch-check-restore>>', text.event_info())
self.assertEqual(text.tag_prevrange('paren', 'end'), ())
# paren_closed_event can only be tested as below
pm.paren_closed_event('event')
self.assertTupleEqual(text.tag_prevrange('paren', 'end'),
('1.10', '1.16'))
def test_paren_default(self):
"""
Test ParenMatch with 'default' style.
"""
text = self.text
pm = ParenMatch(self.editwin)
pm.set_style('default')
text.insert('insert', 'def foobar(a, b')
pm.flash_paren_event('event')
self.assertIn('<<parenmatch-check-restore>>', text.event_info())
self.assertTupleEqual(text.tag_prevrange('paren', 'end'),
('1.10', '1.11'))
text.insert('insert', ')')
pm.restore_event()
self.assertNotIn('<<parenmatch-check-restore>>', text.event_info())
self.assertEqual(text.tag_prevrange('paren', 'end'), ())
def test_paren_corner(self):
"""
Test corner cases in flash_paren_event and paren_closed_event.
These cases force conditional expression and alternate paths.
"""
text = self.text
pm = ParenMatch(self.editwin)
text.insert('insert', '# this is a commen)')
self.assertIsNone(pm.paren_closed_event('event'))
text.insert('insert', '\ndef')
self.assertIsNone(pm.flash_paren_event('event'))
self.assertIsNone(pm.paren_closed_event('event'))
text.insert('insert', ' a, *arg)')
self.assertIsNone(pm.paren_closed_event('event'))
def test_handle_restore_timer(self):
pm = ParenMatch(self.editwin)
pm.restore_event = Mock()
pm.handle_restore_timer(0)
self.assertTrue(pm.restore_event.called)
pm.restore_event.reset_mock()
pm.handle_restore_timer(1)
self.assertFalse(pm.restore_event.called)
if __name__ == '__main__':
unittest.main(verbosity=2)
| gpl-3.0 |
Sparker0i/fosswebsite | noticeBoard/forms.py | 8 | 1131 | from django import forms
from noticeBoard.models import Notice
class NoticeCreateForm(forms.ModelForm):
title = forms.CharField(label='Title', help_text="Enter title",
widget=forms.TextInput(attrs={'placeholder': "Title"}))
message = forms.CharField(label="Message", help_text="Enter message to be posted",
widget=forms.Textarea(attrs={"placeholder": 'Message'}))
startdate = forms.DateTimeField(label="Enter start date and time",
widget=forms.DateTimeInput())
enddate = forms.DateTimeField(label="Enter end date and time",
widget=forms.DateTimeInput())
url = forms.URLField(label="Enter url if any",
widget=forms.URLInput())
def __init__(self, *args, **kwargs):
super(NoticeCreateForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({'class': 'form-control'})
class Meta:
model = Notice
fields = ['title', 'message', 'startdate', 'enddate', 'url']
| mit |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/csv.py | 4 | 16343 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from functools import reduce
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError, e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = self.reader.next()
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def next(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = self.reader.next()
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: " +
", ".join(wrong_fields))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(lambda a, b, quotes = quotes:
(quotes[a] > quotes[b]) and a or b, quotes.keys())
if delims:
delim = reduce(lambda a, b, delims = delims:
(delims[a] > delims[b]) and a or b, delims.keys())
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':delim, 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of freqencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = rdr.next() # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in columnTypes.keys():
for thisType in [int, long, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == long:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| apache-2.0 |
brchiu/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/custom_loss_head.py | 87 | 3030 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of `head.Head` with custom loss and link function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
class CustomLossHead(head_lib._RegressionHead): # pylint: disable=protected-access
"""A Head object with custom loss function and link function."""
def __init__(self,
loss_fn,
link_fn,
logit_dimension,
head_name=None,
weight_column_name=None,
metrics_fn=None):
"""`Head` for specifying arbitrary loss function.
Args:
loss_fn: Loss function.
link_fn: Function that converts logits to prediction.
logit_dimension: Number of dimensions for the logits.
head_name: name of the head. Predictions, summary, metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
metrics_fn: a function that takes predictions dict, labels and weights and
returns a dictionary of metrics to be calculated.
"""
def loss_wrapper(labels, logits, weight_tensor):
if weight_tensor is None:
weight_tensor = array_ops.ones(
shape=[array_ops.shape(labels)[0], 1], dtype=dtypes.float32)
weighted_loss, _ = loss_fn(labels, weight_tensor, logits)
average_loss = math_ops.reduce_mean(weighted_loss)
return average_loss, average_loss / math_ops.reduce_mean(weight_tensor)
super(CustomLossHead, self).__init__(
loss_fn=loss_wrapper,
link_fn=link_fn,
head_name=head_name,
weight_column_name=weight_column_name,
enable_centered_bias=False,
label_dimension=logit_dimension)
self._metrics_fn = metrics_fn
def _metrics(self, eval_loss, predictions, labels, weights):
if self._metrics_fn is not None:
return self._metrics_fn(predictions, labels, weights)
| apache-2.0 |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/pip-7.1.0-py3.4.egg/pip/_vendor/requests/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| mit |
rampantpixels/task_lib | build/ninja/codesign.py | 14 | 8808 | #!/usr/bin/env python
"""Codesign utility"""
import argparse
import subprocess
import os
import time
import shutil
import json
parser = argparse.ArgumentParser(description = 'Codesign utility for Ninja builds')
parser.add_argument('file', type=str,
help = 'Bundle/package to sign')
parser.add_argument('--target', type=str,
help = 'Target',
choices = ['macos', 'ios', 'android'],
default = '')
parser.add_argument('--bundle', type=str,
help = 'Bundle identifier (OSX/iOS)',
default = '')
parser.add_argument('--organisation', type=str,
help = 'Organisation identifier (OSX/iOS)',
default = '')
parser.add_argument('--provisioning', type=str,
help = 'Provisioning profile (OSX/iOS)',
default = '')
parser.add_argument('--builddir', type=str,
help = 'Build directory (OSX/iOS)',
default = '')
parser.add_argument('--binname', type=str,
help = 'Binary name (OSX/iOS)',
default = '')
parser.add_argument('--zipfile', type=str,
help = 'Zip file (Android)',
default = '')
parser.add_argument('--tsacert', type=str,
help = 'TSA cert (Android)',
default = '')
parser.add_argument('--tsa', type=str,
help = 'TSA (Android)',
default = '')
parser.add_argument('--keystore', type=str,
help = 'Keystore (Android)',
default = '')
parser.add_argument('--keystorepass', type=str,
help = 'Keystore password (Android)',
default = '')
parser.add_argument('--keyalias', type=str,
help = 'Key alias (Android)',
default = '')
parser.add_argument('--keypass', type=str,
help = 'Key password (Android)',
default = '')
parser.add_argument('--jarsigner', type=str,
help = 'JAR signer (Android)',
default = 'jarsigner')
parser.add_argument('--prefs', type=str,
help = 'Preferences file',
default = '')
parser.add_argument('--config', type=str,
help = 'Build configuration',
default = '')
options = parser.parse_args()
androidprefs = {}
iosprefs = {}
macosprefs = {}
def parse_prefs( prefsfile ):
global androidprefs
global iosprefs
global macosprefs
if not os.path.isfile( prefsfile ):
return
file = open( prefsfile, 'r' )
prefs = json.load( file )
file.close()
if 'android' in prefs:
androidprefs = prefs['android']
if 'ios' in prefs:
iosprefs = prefs['ios']
if 'macos' in prefs:
macosprefs = prefs['macos']
def codesign_ios():
if not 'organisation' in iosprefs:
iosprefs['organisation'] = options.organisation
if not 'bundleidentifier' in iosprefs:
iosprefs['bundleidentifier'] = options.bundle
if not 'provisioning' in iosprefs:
iosprefs['provisioning'] = options.provisioning
sdkdir = subprocess.check_output( [ 'xcrun', '--sdk', 'iphoneos', '--show-sdk-path' ] ).strip()
entitlements = os.path.join( sdkdir, 'Entitlements.plist' )
plistpath = os.path.join( options.builddir, 'Entitlements.xcent' )
platformpath = subprocess.check_output( [ 'xcrun', '--sdk', 'iphoneos', '--show-sdk-platform-path' ] ).strip()
localpath = platformpath + "/Developer/usr/bin:/Applications/Xcode.app/Contents/Developer/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin"
plutil = "PATH=" + localpath + " " + subprocess.check_output( [ 'xcrun', '--sdk', 'iphoneos', '-f', 'plutil' ] ).strip()
shutil.copyfile( entitlements, plistpath )
os.system( plutil + ' -convert xml1 ' + plistpath )
f = open( plistpath, 'r' )
lines = [ line.strip( '\n\r' ) for line in f ]
f.close()
for i in range( 0, len( lines ) ):
if lines[i].find( '$(AppIdentifierPrefix)' ) != -1:
lines[i] = lines[i].replace( '$(AppIdentifierPrefix)', iosprefs['organisation'] + '.' )
if lines[i].find( '$(CFBundleIdentifier)' ) != -1:
lines[i] = lines[i].replace( '$(CFBundleIdentifier)', iosprefs['bundleidentifier'] )
if lines[i].find( '$(binname)' ) != -1:
lines[i] = lines[i].replace( '$(binname)', options.binname )
with open( plistpath, 'w' ) as plist_file:
for line in lines:
if options.config != 'deploy' and line == '</dict>':
plist_file.write( '\t<key>get-task-allow</key>\n' )
plist_file.write( '\t<true/>\n' )
plist_file.write( line + '\n' )
plist_file.close()
if os.path.isfile( os.path.join( options.file, '_CodeSignature', 'CodeResources' ) ):
os.remove( os.path.join( options.file, '_CodeSignature', 'CodeResources' ) )
os.system( '/usr/bin/codesign --force --sign "' + iosprefs['signature'] + '" --entitlements ' + plistpath + ' ' + options.file )
if os.path.isfile( os.path.join( options.file, '_CodeSignature', 'CodeResources' ) ):
os.utime( os.path.join( options.file, '_CodeSignature', 'CodeResources' ), None )
os.utime( os.path.join( options.file, '_CodeSignature' ), None )
os.utime( options.file, None )
def codesign_macos():
if not 'organisation' in macosprefs:
macosprefs['organisation'] = options.organisation
if not 'bundleidentifier' in macosprefs:
macosprefs['bundleidentifier'] = options.bundle
if not 'provisioning' in macosprefs:
macosprefs['provisioning'] = options.provisioning
codesign_allocate = subprocess.check_output( [ 'xcrun', '--sdk', 'macosx', '-f', 'codesign_allocate' ] ).strip()
sdkdir = subprocess.check_output( [ 'xcrun', '--sdk', 'macosx', '--show-sdk-path' ] ).strip()
entitlements = os.path.join( sdkdir, 'Entitlements.plist' )
if os.path.isfile( os.path.join( options.file, 'Contents', '_CodeSignature', 'CodeResources' ) ):
os.remove( os.path.join( options.file, 'Contents', '_CodeSignature', 'CodeResources' ) )
if 'signature' in macosprefs:
os.system( 'export CODESIGN_ALLOCATE=' + codesign_allocate + '; /usr/bin/codesign --force --sign ' + macosprefs['signature'] + ' ' + options.file )
if os.path.isfile( os.path.join( options.file, 'Contents', '_CodeSignature', 'CodeResources' ) ):
os.utime( os.path.join( options.file, 'Contents', '_CodeSignature', 'CodeResources' ), None )
os.utime( os.path.join( options.file, 'Contents', '_CodeSignature' ), None )
os.utime( os.path.join( options.file, 'Contents' ), None )
os.utime( options.file, None )
def codesign_android():
if not 'tsacert' in androidprefs:
androidprefs['tsacert'] = options.tsacert
if not 'tsa' in androidprefs:
androidprefs['tsa'] = options.tsa
if not 'keystore' in androidprefs:
androidprefs['keystore'] = options.keystore
if not 'keystorepass' in androidprefs:
androidprefs['keystorepass'] = options.keystorepass
if not 'keyalias' in androidprefs:
androidprefs['keyalias'] = options.keyalias
if not 'keypass' in androidprefs:
androidprefs['keypass'] = options.keypass
if not 'jarsigner' in androidprefs:
androidprefs['jarsigner'] = options.jarsigner
timestamp = ''
if androidprefs['tsacert'] != '':
timestamp = '-tsacert ' + androidprefs['tsacert']
elif androidprefs['tsa'] != '':
timestamp = '-tsa ' + androidprefs['tsa']
proxy = ''
if 'proxy' in androidprefs and androidprefs['proxy'] != '' and androidprefs['proxy'] != 'None':
proxy = androidprefs['proxy']
if proxy != '' and proxy != 'None':
defstr = "-J-Dhttp.proxy"
url = urlparse.urlparse(proxy)
if url.scheme == 'https':
defstr = "-J-Dhttps.proxy"
host = url.netloc
port = ''
username = ''
password = ''
if '@' in host:
username, host = host.split(':', 1)
password, host = host.split('@', 1)
if ':' in host:
host, port = host.split(':', 1)
proxy = defstr + "Host=" + host
if port != '':
proxy += " " + defstr + "Port=" + port
if username != '':
proxy += " " + defstr + "User=" + username
if password != '':
proxy += " " + defstr + "Password=" + password
signcmd = androidprefs['jarsigner'] + ' ' + timestamp + ' -sigalg SHA1withRSA -digestalg SHA1 -keystore ' + androidprefs['keystore'] + ' -storepass ' + androidprefs['keystorepass'] + ' -keypass ' + androidprefs['keypass'] + ' -signedjar ' + options.file + ' ' + options.zipfile + ' ' + androidprefs['keyalias'] + ' ' + proxy
os.system(signcmd)
parse_prefs( options.prefs )
if options.target == 'ios':
codesign_ios()
elif options.target == 'macos':
codesign_macos()
elif options.target == 'android':
codesign_android()
| unlicense |
liuqr/edx-xiaodun | lms/envs/acceptance.py | 6 | 5707 | """
This config file extends the test environment configuration
so that we can run the lettuce acceptance tests.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .test import *
from .sauce import *
# You need to start the server in debug mode,
# otherwise the browser will not render the pages correctly
DEBUG = True
SITE_NAME = 'localhost:{}'.format(LETTUCE_SERVER_PORT)
# Output Django logs to a file
import logging
logging.basicConfig(filename=TEST_ROOT / "log" / "lms_acceptance.log", level=logging.ERROR)
import os
from random import choice
import string
def seed():
return os.getppid()
# Use the mongo store for acceptance tests
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'acceptance_xmodule',
'collection': 'acceptance_modulestore_%s' % seed(),
}
modulestore_options = {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': TEST_ROOT / "data",
'render_template': 'edxmako.shortcuts.render_to_string',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'reference_type': 'Location',
'stores': {
'default': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
}
}
}
}
}
MODULESTORE['direct'] = MODULESTORE['default']
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'acceptance_xcontent_%s' % seed(),
}
}
# Set this up so that rake lms[acceptance] and running the
# harvest command both use the same (test) database
# which they can flush without messing up your dev db
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "test_edx.db",
'TEST_NAME': TEST_ROOT / "db" / "test_edx.db",
}
}
TRACKING_BACKENDS.update({
'mongo': {
'ENGINE': 'track.backends.mongodb.MongoBackend'
}
})
# Enable asset pipeline
# Our fork of django-pipeline uses `PIPELINE` instead of `PIPELINE_ENABLED`
# PipelineFinder is explained here: http://django-pipeline.readthedocs.org/en/1.1.24/storages.html
PIPELINE = True
STATICFILES_FINDERS += ('pipeline.finders.PipelineFinder', )
BULK_EMAIL_DEFAULT_FROM_EMAIL = "test@test.org"
# Forums are disabled in test.py to speed up unit tests, but we do not have
# per-test control for acceptance tests
FEATURES['ENABLE_DISCUSSION_SERVICE'] = True
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
# Enable fake payment processing page
FEATURES['ENABLE_PAYMENT_FAKE'] = True
# Enable email on the instructor dash
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False
# Don't actually send any requests to Software Secure for student identity
# verification.
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
# Configure the payment processor to use the fake processing page
# Since both the fake payment page and the shoppingcart app are using
# the same settings, we can generate this randomly and guarantee
# that they are using the same secret.
RANDOM_SHARED_SECRET = ''.join(
choice(string.letters + string.digits + string.punctuation)
for x in range(250)
)
CC_PROCESSOR['CyberSource']['SHARED_SECRET'] = RANDOM_SHARED_SECRET
CC_PROCESSOR['CyberSource']['MERCHANT_ID'] = "edx"
CC_PROCESSOR['CyberSource']['SERIAL_NUMBER'] = "0123456789012345678901"
CC_PROCESSOR['CyberSource']['PURCHASE_ENDPOINT'] = "/shoppingcart/payment_fake"
# HACK
# Setting this flag to false causes imports to not load correctly in the lettuce python files
# We do not yet understand why this occurs. Setting this to true is a stopgap measure
USE_I18N = True
FEATURES['ENABLE_FEEDBACK_SUBMISSION'] = True
FEEDBACK_SUBMISSION_EMAIL = 'dummy@example.com'
# Include the lettuce app for acceptance testing, including the 'harvest' django-admin command
INSTALLED_APPS += ('lettuce.django',)
LETTUCE_APPS = ('courseware', 'instructor',)
# Lettuce appears to have a bug that causes it to search
# `instructor_task` when we specify the `instructor` app.
# This causes some pretty cryptic errors as lettuce tries
# to parse files in `instructor_task` as features.
# As a quick workaround, explicitly exclude the `instructor_task` app.
LETTUCE_AVOID_APPS = ('instructor_task',)
LETTUCE_BROWSER = os.environ.get('LETTUCE_BROWSER', 'chrome')
# Where to run: local, saucelabs, or grid
LETTUCE_SELENIUM_CLIENT = os.environ.get('LETTUCE_SELENIUM_CLIENT', 'local')
SELENIUM_GRID = {
'URL': 'http://127.0.0.1:4444/wd/hub',
'BROWSER': LETTUCE_BROWSER,
}
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
# Because an override for where to run will affect which ports to use,
# set these up after the local overrides.
# Configure XQueue interface to use our stub XQueue server
XQUEUE_INTERFACE = {
"url": "http://127.0.0.1:{0:d}".format(XQUEUE_PORT),
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE_TEST_URL = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
| agpl-3.0 |
pkleimert/hrpt | apps/survey/migrations/0006_auto__add_field_surveyuser_oneuser.py | 4 | 12365 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SurveyUser.oneuser'
db.add_column('survey_surveyuser', 'oneuser', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'SurveyUser.oneuser'
db.delete_column('survey_surveyuser', 'oneuser_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.extraresponse': {
'Meta': {'object_name': 'ExtraResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.lastresponse': {
'Meta': {'object_name': 'LastResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'})
},
'survey.localflusurvey': {
'Meta': {'object_name': 'LocalFluSurvey'},
'age_user': ('django.db.models.fields.SmallIntegerField', [], {}),
'data': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.localprofile': {
'Meta': {'object_name': 'LocalProfile'},
'a_family': ('django.db.models.fields.SmallIntegerField', [], {}),
'a_smoker': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_current': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_seasonal': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_swine': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'birth_date': ('django.db.models.fields.DateField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'sq_date_first': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_date_last': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_num_season': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'sq_num_total': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'survey.localresponse': {
'Meta': {'object_name': 'LocalResponse'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.participation': {
'Meta': {'object_name': 'Participation'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'epidb_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'previous_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'previous_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Survey']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.profile': {
'Meta': {'object_name': 'Profile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Survey']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.profilesendqueue': {
'Meta': {'object_name': 'ProfileSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.responsesendqueue': {
'Meta': {'object_name': 'ResponseSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specification': ('django.db.models.fields.TextField', [], {}),
'survey_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'survey.surveyuser': {
'Meta': {'object_name': 'SurveyUser'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'global_id': ('django.db.models.fields.CharField', [], {'default': "'c4b8d923-bf27-4a67-90d8-39550680036a'", 'unique': 'True', 'max_length': '36'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'last_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'oneuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'temporary_name'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
}
}
complete_apps = ['survey']
| agpl-3.0 |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/test/test_doctest2.py | 1 | 2389 | # -*- coding: utf-8 -*-
u"""A module to test whether doctest recognizes some 2.2 features,
like static and class methods.
>>> print 'yup' # 1
yup
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
from test import test_support
class C(object):
u"""Class C.
>>> print C() # 2
42
We include some (random) encoded (utf-8) text in the text surrounding
the example. It should be ignored:
ЉЊЈЁЂ
"""
def __init__(self):
"""C.__init__.
>>> print C() # 3
42
"""
def __str__(self):
"""
>>> print C() # 4
42
"""
return "42"
class D(object):
"""A nested D class.
>>> print "In D!" # 5
In D!
"""
def nested(self):
"""
>>> print 3 # 6
3
"""
def getx(self):
"""
>>> c = C() # 7
>>> c.x = 12 # 8
>>> print c.x # 9
-12
"""
return -self._x
def setx(self, value):
"""
>>> c = C() # 10
>>> c.x = 12 # 11
>>> print c.x # 12
-12
"""
self._x = value
x = property(getx, setx, doc="""\
>>> c = C() # 13
>>> c.x = 12 # 14
>>> print c.x # 15
-12
""")
@staticmethod
def statm():
"""
A static method.
>>> print C.statm() # 16
666
>>> print C().statm() # 17
666
"""
return 666
@classmethod
def clsm(cls, val):
"""
A class method.
>>> print C.clsm(22) # 18
22
>>> print C().clsm(23) # 19
23
"""
return val
def test_main():
from test import test_doctest2
EXPECTED = 19
f, t = test_support.run_doctest(test_doctest2)
if t != EXPECTED:
raise test_support.TestFailed("expected %d tests to run, not %d" %
(EXPECTED, t))
# Pollute the namespace with a bunch of imported functions and classes,
# to make sure they don't get tested.
from doctest import *
if __name__ == '__main__':
test_main()
| mit |
mancoast/CPythonPyc_test | fail/332_test_codecmaps_kr.py | 28 | 1393 | #!/usr/bin/env python3
#
# test_codecmaps_kr.py
# Codec mapping tests for ROK encodings
#
from test import support
from test import multibytecodec_support
import unittest
class TestCP949Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp949'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT' \
'/WINDOWS/CP949.TXT'
class TestEUCKRMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_kr'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-KR.TXT'
# A4D4 HANGUL FILLER indicates the begin of 8-bytes make-up sequence.
pass_enctest = [(b'\xa4\xd4', '\u3164')]
pass_dectest = [(b'\xa4\xd4', '\u3164')]
class TestJOHABMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'johab'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/' \
'KSC/JOHAB.TXT'
# KS X 1001 standard assigned 0x5c as WON SIGN.
# but, in early 90s that is the only era used johab widely,
# the most softwares implements it as REVERSE SOLIDUS.
# So, we ignore the standard here.
pass_enctest = [(b'\\', '\u20a9')]
pass_dectest = [(b'\\', '\u20a9')]
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
EPDCenter/android_kernel_archos_97_titan | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
baronleonardo/mailchecker | account.py | 1 | 3594 | import sys
from sys_keyring import MailChecker_Keyring
from settings import MailChecker_AccountSettings
class MailChecker_AccountsManagerData():
nickname = ""
email_address = ""
password = ""
imap = ""
mailbox = "",
timeout = -1
def __init__(self,
nickname: str,
email_address: str,
password: str,
imap: str,
mailbox: str = 'index',
timeout: int = 10):
self.nickname = nickname
self.email_address = email_address
self.password = password
self.imap = imap
self.mailbox = mailbox
self.timeout = timeout
class MailChecker_AccountsManager():
def __init__(self, app_name: str):
self.__account_settings = MailChecker_AccountSettings()
self.__keyring = MailChecker_Keyring(app_name)
def add_account(self,
nickname: str,
email_address: str,
password: str,
imap: str,
mailbox: str = 'index',
timeout: int = 10):
if self.__account_settings.exits(nickname):
print("An account already exists with the same `nickname` %s" % nickname,
file=sys.stderr)
return False
# create first in `accounts.ini`
self.__account_settings.create(nickname, email_address, imap, mailbox, timeout)
# create the password in the keyring
self.__keyring.add_account(nickname, password)
def delete_account(self, nickname: str) -> bool:
if self.__account_settings.remove(nickname):
if self.__keyring.delete_account(nickname):
return True
return False
def update_account(self,
nickname: str,
new_email: str = "",
new_password: str = "",
new_imap: str = "",
new_mailbox: str = "",
new_timeout: int = -1):
if self.__account_settings.update(nickname, new_email, new_imap, new_mailbox, new_timeout):
if new_password != "":
if self.__keyring.update_account(nickname, new_password):
return True
else:
return False
else:
return False
def get_account_data(self, nickname) -> dict:
account = self.__account_settings.read_email_data(nickname)
# convert timeout into integer
account['timeout'] = int(account['timeout'])
account['password'] = self.__keyring.get_account_password(nickname)
return account
def get_all_accounts_data(self) -> list:
data = []
for nickname in self.__account_settings.list_nicknames():
data.append(self.get_account_data(nickname))
return data
def get_all_accounts_nicknames(self) -> list:
return self.__account_settings.list_nicknames()
def check_account_existance(self, nickname) -> bool:
if self.__account_settings.exits(nickname):
return True
return False
if __name__ == "__main__":
manager = MailChecker_AccountsManager("mailchecker")
manager.add_account("test", "mohamedayman.fcis@zoho.com", "0121314", "imap.zoho.com")
manager.update_account("test", new_password="meandme")
print(manager.get_account_data("test"))
print(manager.check_account_existance("test"))
print(manager.get_all_accounts_data())
# manager.delete_account("test")
| gpl-3.0 |
zstyblik/infernal-twin | build/pillow/Scripts/painter.py | 11 | 2037 | #!/usr/bin/env python
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image everytime we paste, so to get decent performance, we split
# the image into a set of tiles.
#
try:
from tkinter import Tk, Canvas, NW
except ImportError:
from Tkinter import Tk, Canvas, NW
from PIL import Image, ImageTk
import sys
#
# painter widget
class PaintCanvas(Canvas):
def __init__(self, master, image):
Canvas.__init__(self, master, width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| gpl-3.0 |
helldorado/ansible | test/units/vars/test_variable_manager.py | 25 | 11823 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from units.compat import unittest
from units.compat.mock import MagicMock, patch
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import iteritems
from ansible.playbook.play import Play
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
from ansible.vars.manager import VariableManager
class TestVariableManager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_manager(self):
fake_loader = DictDataLoader({})
mock_inventory = MagicMock()
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
variables = v.get_vars(use_cache=False)
# Check var manager expected values, never check: ['omit', 'vars']
# FIXME: add the following ['ansible_version', 'ansible_playbook_python', 'groups']
for varname, value in (('playbook_dir', os.path.abspath('.')), ):
self.assertEqual(variables[varname], value)
def test_variable_manager_extra_vars(self):
fake_loader = DictDataLoader({})
extra_vars = dict(a=1, b=2, c=3)
mock_inventory = MagicMock()
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
# override internal extra_vars loading
v._extra_vars = extra_vars
myvars = v.get_vars(use_cache=False)
for (key, val) in iteritems(extra_vars):
self.assertEqual(myvars.get(key), val)
def test_variable_manager_options_vars(self):
fake_loader = DictDataLoader({})
options_vars = dict(a=1, b=2, c=3)
mock_inventory = MagicMock()
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
# override internal options_vars loading
v._extra_vars = options_vars
myvars = v.get_vars(use_cache=False)
for (key, val) in iteritems(options_vars):
self.assertEqual(myvars.get(key), val)
def test_variable_manager_play_vars(self):
fake_loader = DictDataLoader({})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict(foo="bar")
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = []
mock_inventory = MagicMock()
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
self.assertEqual(v.get_vars(play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_play_vars_files(self):
fake_loader = DictDataLoader({
__file__: """
foo: bar
"""
})
mock_play = MagicMock()
mock_play.get_vars.return_value = dict()
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = [__file__]
mock_inventory = MagicMock()
v = VariableManager(inventory=mock_inventory, loader=fake_loader)
self.assertEqual(v.get_vars(play=mock_play, use_cache=False).get("foo"), "bar")
def test_variable_manager_task_vars(self):
# FIXME: BCS make this work
return
# pylint: disable=unreachable
fake_loader = DictDataLoader({})
mock_task = MagicMock()
mock_task._role = None
mock_task.loop = None
mock_task.get_vars.return_value = dict(foo="bar")
mock_task.get_include_params.return_value = dict()
mock_all = MagicMock()
mock_all.get_vars.return_value = {}
mock_all.get_file_vars.return_value = {}
mock_host = MagicMock()
mock_host.get.name.return_value = 'test01'
mock_host.get_vars.return_value = {}
mock_host.get_host_vars.return_value = {}
mock_inventory = MagicMock()
mock_inventory.hosts.get.return_value = mock_host
mock_inventory.hosts.get.name.return_value = 'test01'
mock_inventory.get_host.return_value = mock_host
mock_inventory.groups.__getitem__.return_value = mock_all
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
self.assertEqual(v.get_vars(task=mock_task, use_cache=False).get("foo"), "bar")
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_variable_manager_precedence(self):
# FIXME: this needs to be redone as dataloader is not the automatic source of data anymore
return
# pylint: disable=unreachable
'''
Tests complex variations and combinations of get_vars() with different
objects to modify the context under which variables are merged.
'''
# FIXME: BCS makethiswork
# return True
mock_inventory = MagicMock()
inventory1_filedata = """
[group2:children]
group1
[group1]
host1 host_var=host_var_from_inventory_host1
[group1:vars]
group_var = group_var_from_inventory_group1
[group2:vars]
group_var = group_var_from_inventory_group2
"""
fake_loader = DictDataLoader({
# inventory1
'/etc/ansible/inventory1': inventory1_filedata,
# role defaults_only1
'/etc/ansible/roles/defaults_only1/defaults/main.yml': """
default_var: "default_var_from_defaults_only1"
host_var: "host_var_from_defaults_only1"
group_var: "group_var_from_defaults_only1"
group_var_all: "group_var_all_from_defaults_only1"
extra_var: "extra_var_from_defaults_only1"
""",
'/etc/ansible/roles/defaults_only1/tasks/main.yml': """
- debug: msg="here i am"
""",
# role defaults_only2
'/etc/ansible/roles/defaults_only2/defaults/main.yml': """
default_var: "default_var_from_defaults_only2"
host_var: "host_var_from_defaults_only2"
group_var: "group_var_from_defaults_only2"
group_var_all: "group_var_all_from_defaults_only2"
extra_var: "extra_var_from_defaults_only2"
""",
})
inv1 = InventoryManager(loader=fake_loader, sources=['/etc/ansible/inventory1'])
v = VariableManager(inventory=mock_inventory, loader=fake_loader)
play1 = Play.load(dict(
hosts=['all'],
roles=['defaults_only1', 'defaults_only2'],
), loader=fake_loader, variable_manager=v)
# first we assert that the defaults as viewed as a whole are the merged results
# of the defaults from each role, with the last role defined "winning" when
# there is a variable naming conflict
res = v.get_vars(play=play1)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only2')
# next, we assert that when vars are viewed from the context of a task within a
# role, that task will see its own role defaults before any other role's
blocks = play1.compile()
task = blocks[1].block[0]
res = v.get_vars(play=play1, task=task)
self.assertEqual(res['default_var'], 'default_var_from_defaults_only1')
# next we assert the precedence of inventory variables
v.set_inventory(inv1)
h1 = inv1.get_host('host1')
res = v.get_vars(play=play1, host=h1)
self.assertEqual(res['group_var'], 'group_var_from_inventory_group1')
self.assertEqual(res['host_var'], 'host_var_from_inventory_host1')
# next we test with group_vars/ files loaded
fake_loader.push("/etc/ansible/group_vars/all", """
group_var_all: group_var_all_from_group_vars_all
""")
fake_loader.push("/etc/ansible/group_vars/group1", """
group_var: group_var_from_group_vars_group1
""")
fake_loader.push("/etc/ansible/group_vars/group3", """
# this is a dummy, which should not be used anywhere
group_var: group_var_from_group_vars_group3
""")
fake_loader.push("/etc/ansible/host_vars/host1", """
host_var: host_var_from_host_vars_host1
""")
fake_loader.push("group_vars/group1", """
playbook_group_var: playbook_group_var
""")
fake_loader.push("host_vars/host1", """
playbook_host_var: playbook_host_var
""")
res = v.get_vars(play=play1, host=h1)
# self.assertEqual(res['group_var'], 'group_var_from_group_vars_group1')
# self.assertEqual(res['group_var_all'], 'group_var_all_from_group_vars_all')
# self.assertEqual(res['playbook_group_var'], 'playbook_group_var')
# self.assertEqual(res['host_var'], 'host_var_from_host_vars_host1')
# self.assertEqual(res['playbook_host_var'], 'playbook_host_var')
# add in the fact cache
v._fact_cache['host1'] = dict(fact_cache_var="fact_cache_var_from_fact_cache")
res = v.get_vars(play=play1, host=h1)
self.assertEqual(res['fact_cache_var'], 'fact_cache_var_from_fact_cache')
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_variable_manager_role_vars_dependencies(self):
'''
Tests vars from role dependencies with duplicate dependencies.
'''
mock_inventory = MagicMock()
fake_loader = DictDataLoader({
# role common-role
'/etc/ansible/roles/common-role/tasks/main.yml': """
- debug: msg="{{role_var}}"
""",
# We do not need allow_duplicates: yes for this role
# because eliminating duplicates is done by the execution
# strategy, which we do not test here.
# role role1
'/etc/ansible/roles/role1/vars/main.yml': """
role_var: "role_var_from_role1"
""",
'/etc/ansible/roles/role1/meta/main.yml': """
dependencies:
- { role: common-role }
""",
# role role2
'/etc/ansible/roles/role2/vars/main.yml': """
role_var: "role_var_from_role2"
""",
'/etc/ansible/roles/role2/meta/main.yml': """
dependencies:
- { role: common-role }
""",
})
v = VariableManager(loader=fake_loader, inventory=mock_inventory)
play1 = Play.load(dict(
hosts=['all'],
roles=['role1', 'role2'],
), loader=fake_loader, variable_manager=v)
# The task defined by common-role exists twice because role1
# and role2 depend on common-role. Check that the tasks see
# different values of role_var.
blocks = play1.compile()
task = blocks[1].block[0]
res = v.get_vars(play=play1, task=task)
self.assertEqual(res['role_var'], 'role_var_from_role1')
task = blocks[2].block[0]
res = v.get_vars(play=play1, task=task)
self.assertEqual(res['role_var'], 'role_var_from_role2')
| gpl-3.0 |
01org/yoko-tool | yokolibs/PowerMeter.py | 1 | 5430 | #!/usr/bin/env python
#
# Copyright (C) 2016-2020 Intel Corporation
# SPDX-License-Identifier: GPL-2.0-only
#
# -*- coding: utf-8 -*-
# vim: ts=4 sw=4 tw=100 et ai si
#
# Authors: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
# Helia Correia <helia.correia@linux.intel.com>
"""
This module provides API for controlling Yokogawa power meters.
"""
# pylint: disable=too-many-instance-attributes
from __future__ import absolute_import, division, print_function
import logging
from collections import OrderedDict
from yokolibs import Transport, _wt310, _wt210
from yokolibs._config import CONFIG_OPTIONS as _KWARGS
# pylint: disable=unused-import
from yokolibs._exceptions import Error, ErrorBadArgument, ErrorBadResponse
from yokolibs._yokobase import COMMANDS
# pylint: enable=unused-import
# Class objects for the supported power meters.
_PMTYPE_CLASSES = OrderedDict([(_wt310.WT310.pmtypes, _wt310.WT310),
(_wt210.WT210.pmtypes, _wt210.WT210)])
_LOG = logging.getLogger("PowerMeter")
# This makes sure all classes are the new-style classes by default.
__metaclass__ = type # pylint: disable=invalid-name
class PowerMeter:
"""This class extends the capabilities of 'WT310' class."""
def get_argument_help(self, cmd):
"""Return a user-friendly help message for the power meter command 'cmd'."""
if cmd not in self.commands:
raise Error("command '%s' does not support arguments" % cmd)
if self.commands[cmd]["value-descr"]:
return self.commands[cmd]["value-descr"]
if self.commands[cmd]["choices"]:
return ", ".join(self.commands[cmd]["choices"])
raise Error("no help text for '%s'" % cmd)
def close(self):
"""Close the communication interface with the power meter."""
if self._pmeter:
self._pmeter.close()
self._pmeter = None
def command(self, cmd, arg=None):
"""
Execute the power meter command 'cmd' with argument 'arg'. Return the command response or
'None' if the command has no response. 'cmd' should be a string, 'arg' should be of the type
the specific command expects. In most of the cases 'arg' is a string or 'None', but it may
also be a list in case of the 'configure-data-items' command.
"""
try:
return self._pmeter.command(cmd, arg)
except ErrorBadArgument:
raise Error("bad argument '%s' for command '%s', use:\n%s"
% (arg, cmd, self.get_argument_help(cmd)))
def _probe_error(self, errors):
"""TODO"""
import textwrap
wrapper = textwrap.TextWrapper(width=79)
msg = "unknown type of the device '%s'. Here is the log of all the attempts to recognize " \
"the device type." % self._transport.devnode
lines = wrapper.wrap(msg)
wrapper.initial_indent = " * "
wrapper. subsequent_indent = " "
for pmtype, cls, err in errors:
msg = "%s (%s): %s" % (pmtype, cls, err)
lines += wrapper.wrap(msg)
raise Error("\n".join(lines))
def __init__(self, transport=None, **kwargs):
"""
The class constructor. The optional 'transport' argument specifies the transport object to
use. If it is not provided, the rest of the arguments are used to create the transport. The
allowed keys in 'kwargs' are the same as the configuration file options (e.g., 'devnode',
etc).
"""
# Validate kwargs.
for kwarg in kwargs:
if kwarg not in _KWARGS:
raise Error("unknown keyword argument '%s'" % kwargs)
self._transport = transport
if not transport:
self._transport = Transport.Transport(**kwargs)
pmtype = kwargs.get("pmtype", None)
if pmtype:
pmtype = pmtype.lower()
for pmtypes, cls in _PMTYPE_CLASSES.items():
if pmtype in pmtypes:
self._pmeter = cls(self._transport)
break
else:
msg = []
for pmtypes, pmclass in _PMTYPE_CLASSES.items():
msg.append("* %s - %s" % (", ".join(pmtypes), pmclass.name))
raise Error("unsupported power meter type '%s', supported power meter types "
"are:\n%s" % (pmtype, "\n".join(msg)))
else:
errors = []
self._pmeter = None
for pmtypes, cls in _PMTYPE_CLASSES.items():
pmtype = "/".join(pmtypes)
try:
_LOG.debug("probing '%s'", pmtype)
self._pmeter = cls(self._transport)
break
except Error as err:
errors.append((pmtype, cls, err))
if not self._pmeter:
self._probe_error(errors)
def __getattr__(self, name):
"""
If an attribute is not found in PowerMeter, then it is searched for in 'self._pmeter', the
object representing a specific power meter model (eg., WT310).
"""
return getattr(self._pmeter, name)
def __enter__(self):
"""Enter the runtime context."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit the runtime context."""
self.close()
| gpl-2.0 |
Suwmlee/XX-Net | Python3/lib/hmac.py | 142 | 5063 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from _operator import _compare_digest as compare_digest
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object. *OR*
A hash name suitable for hashlib.new().
Defaults to hashlib.md5.
Implicit default to hashlib.md5 is deprecated and will be
removed in Python 3.6.
Note: key and msg must be a bytes or bytearray objects.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if digestmod is None:
_warnings.warn("HMAC() without an explicit digestmod argument "
"is deprecated.", PendingDeprecationWarning, 2)
digestmod = _hashlib.md5
if callable(digestmod):
self.digest_cons = digestmod
elif isinstance(digestmod, str):
self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
self.digest_cons = lambda d=b'': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + bytes(blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
return "hmac-" + self.inner.name
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| bsd-2-clause |
asgard-lab/neutron | neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py | 11 | 4385 | # Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_utils import uuidutils
from neutron import context
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import (
qos_driver)
from neutron.services.qos import qos_consts
from neutron.tests import base
class QosSRIOVAgentDriverTestCase(base.BaseTestCase):
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
def setUp(self):
super(QosSRIOVAgentDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.qos_driver = qos_driver.QosSRIOVAgentDriver()
self.qos_driver.initialize()
self.qos_driver.eswitch_mgr = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock()
self.qos_driver.eswitch_mgr.clear_max_rate = mock.Mock()
self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate
self.clear_max_rate_mock = self.qos_driver.eswitch_mgr.clear_max_rate
self.rule = self._create_bw_limit_rule_obj()
self.qos_policy = self._create_qos_policy_obj([self.rule])
self.port = self._create_fake_port(self.qos_policy.id)
def _create_bw_limit_rule_obj(self):
rule_obj = rule.QosBandwidthLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kbps = 2
rule_obj.max_burst_kbps = 200
rule_obj.obj_reset_changes()
return rule_obj
def _create_qos_policy_obj(self, rules):
policy_dict = {'id': uuidutils.generate_uuid(),
'tenant_id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'test',
'shared': False,
'rules': rules}
policy_obj = policy.QosPolicy(self.context, **policy_dict)
policy_obj.obj_reset_changes()
for policy_rule in policy_obj.rules:
policy_rule.qos_policy_id = policy_obj.id
policy_rule.obj_reset_changes()
return policy_obj
def _create_fake_port(self, qos_policy_id):
return {'port_id': uuidutils.generate_uuid(),
'profile': {'pci_slot': self.PCI_SLOT},
'device': self.ASSIGNED_MAC,
qos_consts.QOS_POLICY_ID: qos_policy_id,
'device_owner': uuidutils.generate_uuid()}
def test_create_rule(self):
self.qos_driver.create(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_update_rule(self):
self.qos_driver.update(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_delete_rules_on_assigned_vf(self):
self.qos_driver.delete(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, 0)
def test_delete_rules_on_released_vf(self):
del self.port['device_owner']
self.qos_driver.delete(self.port, self.qos_policy)
self.clear_max_rate_mock.assert_called_once_with(self.PCI_SLOT)
def test__set_vf_max_rate_captures_sriov_failure(self):
self.max_rate_mock.side_effect = exceptions.SriovNicError()
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
def test__set_vf_max_rate_unknown_device(self):
with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists',
return_value=False):
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
self.assertFalse(self.max_rate_mock.called)
| apache-2.0 |
prampey/servo | tests/wpt/web-platform-tests/tools/py/testing/root/test_py_imports.py | 163 | 1983 | import py
import types
import sys
def checksubpackage(name):
obj = getattr(py, name)
if hasattr(obj, '__map__'): # isinstance(obj, Module):
keys = dir(obj)
assert len(keys) > 0
print (obj.__map__)
for name in list(obj.__map__):
assert hasattr(obj, name), (obj, name)
def test_dir():
for name in dir(py):
if not name.startswith('_'):
yield checksubpackage, name
def test_virtual_module_identity():
from py import path as path1
from py import path as path2
assert path1 is path2
from py.path import local as local1
from py.path import local as local2
assert local1 is local2
def test_importall():
base = py._pydir
nodirs = [
]
if sys.version_info >= (3,0):
nodirs.append(base.join('_code', '_assertionold.py'))
else:
nodirs.append(base.join('_code', '_assertionnew.py'))
def recurse(p):
return p.check(dotfile=0) and p.basename != "attic"
for p in base.visit('*.py', recurse):
if p.basename == '__init__.py':
continue
relpath = p.new(ext='').relto(base)
if base.sep in relpath: # not py/*.py itself
for x in nodirs:
if p == x or p.relto(x):
break
else:
relpath = relpath.replace(base.sep, '.')
modpath = 'py.%s' % relpath
try:
check_import(modpath)
except py.test.skip.Exception:
pass
def check_import(modpath):
py.builtin.print_("checking import", modpath)
assert __import__(modpath)
def test_all_resolves():
seen = py.builtin.set([py])
lastlength = None
while len(seen) != lastlength:
lastlength = len(seen)
for item in py.builtin.frozenset(seen):
for value in item.__dict__.values():
if isinstance(value, type(py.test)):
seen.add(value)
| mpl-2.0 |
Russell-IO/ansible | lib/ansible/plugins/lookup/pipe.py | 150 | 2325 | # (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: pipe
author: Daniel Hokka Zakrisson <daniel@hozac.com>
version_added: "0.9"
short_description: read output from a command
description:
- Run a command and return the output
options:
_terms:
description: command(s) to run
required: True
notes:
- Like all lookups this runs on the Ansible controller and is unaffected by other keywords, such as become,
so if you need to different permissions you must change the command or run Ansible as another user.
- Alternatively you can use a shell/command task that runs against localhost and registers the result.
"""
EXAMPLES = """
- name: raw result of running date command"
debug: msg="{{ lookup('pipe','date') }}"
- name: Always use quote filter to make sure your variables are safe to use with shell
debug: msg="{{ lookup('pipe','getent ' + myuser|quote ) }}"
"""
RETURN = """
_string:
description:
- stdout from command
"""
import subprocess
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
'''
http://docs.python.org/2/library/subprocess.html#popen-constructor
The shell argument (which defaults to False) specifies whether to use the
shell as the program to execute. If shell is True, it is recommended to pass
args as a string rather than as a sequence
https://github.com/ansible/ansible/issues/6550
'''
term = str(term)
p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.append(stdout.decode("utf-8").rstrip())
else:
raise AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
return ret
| gpl-3.0 |
leppa/home-assistant | homeassistant/components/netgear_lte/__init__.py | 3 | 11410 | """Support for Netgear LTE modems."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import attr
import eternalegypt
import voluptuous as vol
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from . import sensor_types
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
DISPATCHER_NETGEAR_LTE = "netgear_lte_update"
DOMAIN = "netgear_lte"
DATA_KEY = "netgear_lte"
EVENT_SMS = "netgear_lte_sms"
SERVICE_DELETE_SMS = "delete_sms"
SERVICE_SET_OPTION = "set_option"
SERVICE_CONNECT_LTE = "connect_lte"
SERVICE_DISCONNECT_LTE = "disconnect_lte"
ATTR_HOST = "host"
ATTR_SMS_ID = "sms_id"
ATTR_FROM = "from"
ATTR_MESSAGE = "message"
ATTR_FAILOVER = "failover"
ATTR_AUTOCONNECT = "autoconnect"
FAILOVER_MODES = ["auto", "wire", "mobile"]
AUTOCONNECT_MODES = ["never", "home", "always"]
NOTIFY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DOMAIN): cv.string,
vol.Optional(CONF_RECIPIENT, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_SENSORS
): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_SENSORS)])
}
)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=sensor_types.DEFAULT_BINARY_SENSORS
): vol.All(cv.ensure_list, [vol.In(sensor_types.ALL_BINARY_SENSORS)])
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(NOTIFY_DOMAIN, default={}): vol.All(
cv.ensure_list, [NOTIFY_SCHEMA]
),
vol.Optional(SENSOR_DOMAIN, default={}): SENSOR_SCHEMA,
vol.Optional(
BINARY_SENSOR_DOMAIN, default={}
): BINARY_SENSOR_SCHEMA,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
DELETE_SMS_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HOST): cv.string,
vol.Required(ATTR_SMS_ID): vol.All(cv.ensure_list, [cv.positive_int]),
}
)
SET_OPTION_SCHEMA = vol.Schema(
vol.All(
cv.has_at_least_one_key(ATTR_FAILOVER, ATTR_AUTOCONNECT),
{
vol.Optional(ATTR_HOST): cv.string,
vol.Optional(ATTR_FAILOVER): vol.In(FAILOVER_MODES),
vol.Optional(ATTR_AUTOCONNECT): vol.In(AUTOCONNECT_MODES),
},
)
)
CONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string})
DISCONNECT_LTE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST): cv.string})
@attr.s
class ModemData:
"""Class for modem state."""
hass = attr.ib()
host = attr.ib()
modem = attr.ib()
data = attr.ib(init=False, default=None)
connected = attr.ib(init=False, default=True)
async def async_update(self):
"""Call the API to update the data."""
try:
self.data = await self.modem.information()
if not self.connected:
_LOGGER.warning("Connected to %s", self.host)
self.connected = True
except eternalegypt.Error:
if self.connected:
_LOGGER.warning("Lost connection to %s", self.host)
self.connected = False
self.data = None
async_dispatcher_send(self.hass, DISPATCHER_NETGEAR_LTE)
@attr.s
class LTEData:
"""Shared state."""
websession = attr.ib()
modem_data = attr.ib(init=False, factory=dict)
def get_modem_data(self, config):
"""Get modem_data for the host in config."""
if config[CONF_HOST] is not None:
return self.modem_data.get(config[CONF_HOST])
if len(self.modem_data) != 1:
return None
return next(iter(self.modem_data.values()))
async def async_setup(hass, config):
"""Set up Netgear LTE component."""
if DATA_KEY not in hass.data:
websession = async_create_clientsession(
hass, cookie_jar=aiohttp.CookieJar(unsafe=True)
)
hass.data[DATA_KEY] = LTEData(websession)
async def service_handler(service):
"""Apply a service."""
host = service.data.get(ATTR_HOST)
conf = {CONF_HOST: host}
modem_data = hass.data[DATA_KEY].get_modem_data(conf)
if not modem_data:
_LOGGER.error("%s: host %s unavailable", service.service, host)
return
if service.service == SERVICE_DELETE_SMS:
for sms_id in service.data[ATTR_SMS_ID]:
await modem_data.modem.delete_sms(sms_id)
elif service.service == SERVICE_SET_OPTION:
failover = service.data.get(ATTR_FAILOVER)
if failover:
await modem_data.modem.set_failover_mode(failover)
autoconnect = service.data.get(ATTR_AUTOCONNECT)
if autoconnect:
await modem_data.modem.set_autoconnect_mode(autoconnect)
elif service.service == SERVICE_CONNECT_LTE:
await modem_data.modem.connect_lte()
elif service.service == SERVICE_DISCONNECT_LTE:
await modem_data.modem.disconnect_lte()
service_schemas = {
SERVICE_DELETE_SMS: DELETE_SMS_SCHEMA,
SERVICE_SET_OPTION: SET_OPTION_SCHEMA,
SERVICE_CONNECT_LTE: CONNECT_LTE_SCHEMA,
SERVICE_DISCONNECT_LTE: DISCONNECT_LTE_SCHEMA,
}
for service, schema in service_schemas.items():
hass.services.async_register(
DOMAIN, service, service_handler, schema=schema
)
netgear_lte_config = config[DOMAIN]
# Set up each modem
tasks = [_setup_lte(hass, lte_conf) for lte_conf in netgear_lte_config]
await asyncio.wait(tasks)
# Load platforms for each modem
for lte_conf in netgear_lte_config:
# Notify
for notify_conf in lte_conf[NOTIFY_DOMAIN]:
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
CONF_NAME: notify_conf.get(CONF_NAME),
NOTIFY_DOMAIN: notify_conf,
}
hass.async_create_task(
discovery.async_load_platform(
hass, NOTIFY_DOMAIN, DOMAIN, discovery_info, config
)
)
# Sensor
sensor_conf = lte_conf.get(SENSOR_DOMAIN)
discovery_info = {CONF_HOST: lte_conf[CONF_HOST], SENSOR_DOMAIN: sensor_conf}
hass.async_create_task(
discovery.async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, discovery_info, config
)
)
# Binary Sensor
binary_sensor_conf = lte_conf.get(BINARY_SENSOR_DOMAIN)
discovery_info = {
CONF_HOST: lte_conf[CONF_HOST],
BINARY_SENSOR_DOMAIN: binary_sensor_conf,
}
hass.async_create_task(
discovery.async_load_platform(
hass, BINARY_SENSOR_DOMAIN, DOMAIN, discovery_info, config
)
)
return True
async def _setup_lte(hass, lte_config):
"""Set up a Netgear LTE modem."""
host = lte_config[CONF_HOST]
password = lte_config[CONF_PASSWORD]
websession = hass.data[DATA_KEY].websession
modem = eternalegypt.Modem(hostname=host, websession=websession)
modem_data = ModemData(hass, host, modem)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
retry_task = hass.loop.create_task(_retry_login(hass, modem_data, password))
@callback
def cleanup_retry(event):
"""Clean up retry task resources."""
if not retry_task.done():
retry_task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_retry)
async def _login(hass, modem_data, password):
"""Log in and complete setup."""
await modem_data.modem.login(password=password)
def fire_sms_event(sms):
"""Send an SMS event."""
data = {
ATTR_HOST: modem_data.host,
ATTR_SMS_ID: sms.id,
ATTR_FROM: sms.sender,
ATTR_MESSAGE: sms.message,
}
hass.bus.async_fire(EVENT_SMS, data)
await modem_data.modem.add_sms_listener(fire_sms_event)
await modem_data.async_update()
hass.data[DATA_KEY].modem_data[modem_data.host] = modem_data
async def cleanup(event):
"""Clean up resources."""
await modem_data.modem.logout()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
async def _update(now):
"""Periodic update."""
await modem_data.async_update()
async_track_time_interval(hass, _update, SCAN_INTERVAL)
async def _retry_login(hass, modem_data, password):
"""Sleep and retry setup."""
_LOGGER.warning("Could not connect to %s. Will keep trying", modem_data.host)
modem_data.connected = False
delay = 15
while not modem_data.connected:
await asyncio.sleep(delay)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
delay = min(2 * delay, 300)
@attr.s
class LTEEntity(Entity):
"""Base LTE entity."""
modem_data = attr.ib()
sensor_type = attr.ib()
_unique_id = attr.ib(init=False)
@_unique_id.default
def _init_unique_id(self):
"""Register unique_id while we know data is valid."""
return f"{self.sensor_type}_{self.modem_data.data.serial_number}"
async def async_added_to_hass(self):
"""Register callback."""
async_dispatcher_connect(
self.hass, DISPATCHER_NETGEAR_LTE, self.async_write_ha_state
)
async def async_update(self):
"""Force update of state."""
await self.modem_data.async_update()
@property
def should_poll(self):
"""Return that the sensor should not be polled."""
return False
@property
def available(self):
"""Return the availability of the sensor."""
return self.modem_data.data is not None
@property
def unique_id(self):
"""Return a unique ID like 'usage_5TG365AB0078V'."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"Netgear LTE {self.sensor_type}"
| apache-2.0 |
maurofaccenda/ansible | lib/ansible/modules/cloud/vmware/vca_nat.py | 49 | 6957 | #!/usr/bin/python
# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vca_nat
short_description: add remove nat rules in a gateway in a vca
description:
- Adds or removes nat rules from a gateway in a vca environment
version_added: "2.0"
author: Peter Sprygada (@privateip)
options:
purge_rules:
description:
- If set to true, it will delete all rules in the gateway that are not given as paramter to this module.
required: false
default: false
nat_rules:
description:
- A list of rules to be added to the gateway, Please see examples on valid entries
required: True
default: false
extends_documentation_fragment: vca.documentation
'''
EXAMPLES = '''
#An example for a source nat
- hosts: localhost
connection: local
tasks:
- vca_nat:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'present'
nat_rules:
- rule_type: SNAT
original_ip: 192.0.2.42
translated_ip: 203.0.113.23
#example for a DNAT
- hosts: localhost
connection: local
tasks:
- vca_nat:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'present'
nat_rules:
- rule_type: DNAT
original_ip: 203.0.113.23
original_port: 22
translated_ip: 192.0.2.42
translated_port: 22
'''
import time
import xmltodict
VALID_RULE_KEYS = ['rule_type', 'original_ip', 'original_port',
'translated_ip', 'translated_port', 'protocol']
def validate_nat_rules(nat_rules):
for rule in nat_rules:
if not isinstance(rule, dict):
raise VcaError("nat rules must be a list of dictionaries, "
"Please check", valid_keys=VALID_RULE_KEYS)
for k in rule.keys():
if k not in VALID_RULE_KEYS:
raise VcaError("%s is not a valid key in nat rules, please "
"check above.." % k, valid_keys=VALID_RULE_KEYS)
rule['original_port'] = str(rule.get('original_port', 'any')).lower()
rule['original_ip'] = rule.get('original_ip', 'any').lower()
rule['translated_ip'] = rule.get('translated_ip', 'any').lower()
rule['translated_port'] = str(rule.get('translated_port', 'any')).lower()
rule['protocol'] = rule.get('protocol', 'any').lower()
rule['rule_type'] = rule.get('rule_type', 'DNAT').lower()
return nat_rules
def nat_rules_to_dict(nat_rules):
result = []
for rule in nat_rules:
gw_rule = rule.get_GatewayNatRule()
result.append(
dict(
rule_type=rule.get_RuleType().lower(),
original_ip=gw_rule.get_OriginalIp().lower(),
original_port=(gw_rule.get_OriginalPort().lower() or 'any'),
translated_ip=gw_rule.get_TranslatedIp().lower(),
translated_port=(gw_rule.get_TranslatedPort().lower() or 'any'),
protocol=(gw_rule.get_Protocol().lower() or 'any')
)
)
return result
def rule_to_string(rule):
strings = list()
for key, value in rule.items():
strings.append('%s=%s' % (key, value))
return ', '.join(string)
def main():
argument_spec = vca_argument_spec()
argument_spec.update(
dict(
nat_rules = dict(type='list', default=[]),
gateway_name = dict(default='gateway'),
purge_rules = dict(default=False, type='bool'),
state = dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
vdc_name = module.params.get('vdc_name')
state = module.params['state']
nat_rules = module.params['nat_rules']
gateway_name = module.params['gateway_name']
purge_rules = module.params['purge_rules']
if not purge_rules and not nat_rules:
module.fail_json(msg='Must define purge_rules or nat_rules')
vca = vca_login(module)
gateway = vca.get_gateway(vdc_name, gateway_name)
if not gateway:
module.fail_json(msg="Not able to find the gateway %s, please check "
"the gateway_name param" % gateway_name)
try:
desired_rules = validate_nat_rules(nat_rules)
except VcaError as e:
module.fail_json(msg=e.message)
rules = gateway.get_nat_rules()
result = dict(changed=False, rules_purged=0)
deletions = 0
additions = 0
if purge_rules is True and len(rules) > 0:
result['rules_purged'] = len(rules)
deletions = result['rules_purged']
rules = list()
if not module.check_mode:
gateway.del_all_nat_rules()
task = gateway.save_services_configuration()
vca.block_until_completed(task)
rules = gateway.get_nat_rules()
result['changed'] = True
current_rules = nat_rules_to_dict(rules)
result['current_rules'] = current_rules
result['desired_rules'] = desired_rules
for rule in desired_rules:
if rule not in current_rules:
additions += 1
if not module.check_mode:
gateway.add_nat_rule(**rule)
result['changed'] = True
result['rules_added'] = additions
result['delete_rule'] = list()
result['delete_rule_rc'] = list()
for rule in current_rules:
if rule not in desired_rules:
deletions += 1
if not module.check_mode:
result['delete_rule'].append(rule)
rc = gateway.del_nat_rule(**rule)
result['delete_rule_rc'].append(rc)
result['changed'] = True
result['rules_deleted'] = deletions
if not module.check_mode and (additions > 0 or deletions > 0):
task = gateway.save_services_configuration()
vca.block_until_completed(task)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.vca import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jtyr/ansible | lib/ansible/utils/unicode.py | 158 | 1166 | # (c) 2012-2014, Toshio Kuratomi <a.badger@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_text
__all__ = ('unicode_wrap')
def unicode_wrap(func, *args, **kwargs):
"""If a function returns a string, force it to be a text string.
Use with partial to ensure that filter plugins will return text values.
"""
return to_text(func(*args, **kwargs), nonstring='passthru')
| gpl-3.0 |
Endika/connector-magento | magentoerpconnect/unit/delete_synchronizer.py | 11 | 1917 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.addons.connector.queue.job import job, related_action
from openerp.addons.connector.unit.synchronizer import Deleter
from ..connector import get_environment
from ..related_action import link
class MagentoDeleter(Deleter):
""" Base deleter for Magento """
def run(self, magento_id):
""" Run the synchronization, delete the record on Magento
:param magento_id: identifier of the record to delete
"""
self.backend_adapter.delete(magento_id)
return _('Record %s deleted on Magento') % magento_id
MagentoDeleteSynchronizer = MagentoDeleter # deprecated
@job(default_channel='root.magento')
@related_action(action=link)
def export_delete_record(session, model_name, backend_id, magento_id):
""" Delete a record on Magento """
env = get_environment(session, model_name, backend_id)
deleter = env.get_connector_unit(MagentoDeleter)
return deleter.run(magento_id)
| agpl-3.0 |
gxx/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/forms/tests/models.py | 50 | 9010 | # -*- coding: utf-8 -*-
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import Form, ModelForm, FileField, ModelChoiceField
from django.test import TestCase
from regressiontests.forms.models import ChoiceModel, ChoiceOptionModel, ChoiceFieldModel, FileModel, Group, BoundaryModel, Defaults
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
class FileForm(Form):
file1 = FileField()
class TestTicket12510(TestCase):
''' It is not necessary to generate choices for ModelChoiceField (regression test for #12510). '''
def setUp(self):
self.groups = [Group.objects.create(name=name) for name in 'abc']
def test_choices_not_fetched_when_not_rendering(self):
def test():
field = ModelChoiceField(Group.objects.order_by('-name'))
self.assertEqual('a', field.clean(self.groups[0].pk).name)
# only one query is required to pull the model from DB
self.assertNumQueries(1, test)
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"If a model's ForeignKey has blank=False and a default, no empty option is created (Refs #10792)."
option = ChoiceOptionModel.objects.create(name='default')
choices = list(ChoiceFieldForm().fields['choice'].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, unicode(option)))
def test_callable_initial_value(self):
"The initial value for a callable default returning a queryset is the pk (refs #13769)"
obj1 = ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertEqual(ChoiceFieldForm().as_p(), """<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="1" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="1" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label> <select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="1" id="initial-id_multi_choice_0" /> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>
<p><label for="id_multi_choice_int">Multi choice int:</label> <select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="1" id="initial-id_multi_choice_int_0" /> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>""")
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
obj1 = ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertEqual(ChoiceFieldForm(initial={
'choice': obj2,
'choice_int': obj2,
'multi_choice': [obj2,obj3],
'multi_choice_int': ChoiceOptionModel.objects.exclude(name="default"),
}).as_p(), """<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="2" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="2" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label> <select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="2" id="initial-id_multi_choice_0" />
<input type="hidden" name="initial-multi_choice" value="3" id="initial-id_multi_choice_1" /> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>
<p><label for="id_multi_choice_int">Multi choice int:</label> <select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="2" id="initial-id_multi_choice_int_0" />
<input type="hidden" name="initial-multi_choice_int" value="3" id="initial-id_multi_choice_int_1" /> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>""")
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with unicode filename and data #########################
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह')}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertTrue('file1' in f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data['file1'])
self.assertEqual(m.file.name, u'tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt')
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PostitiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
f = BoundaryForm({'positive_integer': 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# Formfield initial values ########
# If the model has default values for some fields, they are used as the formfield
# initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
self.assertEqual(DefaultsForm().fields['name'].initial, u'class default value')
self.assertEqual(DefaultsForm().fields['def_date'].initial, datetime.date(1980, 1, 1))
self.assertEqual(DefaultsForm().fields['value'].initial, 42)
r1 = DefaultsForm()['callable_default'].as_widget()
r2 = DefaultsForm()['callable_default'].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(name=u'instance value', def_date=datetime.date(1969, 4, 4), value=12)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial['name'], u'instance value')
self.assertEqual(instance_form.initial['def_date'], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial['value'], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ['name', 'callable_default']
f = ExcludingForm({'name': u'Hello', 'value': 99, 'def_date': datetime.date(1999, 3, 2)})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], u'Hello')
obj = f.save()
self.assertEqual(obj.name, u'class default value')
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
| gpl-3.0 |
cogeorg/BlackRhino | networkx/algorithms/flow/utils.py | 23 | 5880 | # -*- coding: utf-8 -*-
"""
Utility classes and functions for network flow algorithms.
"""
__author__ = """ysitu <ysitu@users.noreply.github.com>"""
# Copyright (C) 2014 ysitu <ysitu@users.noreply.github.com>
# All rights reserved.
# BSD license.
from collections import deque
import networkx as nx
__all__ = ['CurrentEdge', 'Level', 'GlobalRelabelThreshold',
'build_residual_network', 'detect_unboundedness', 'build_flow_dict']
class CurrentEdge(object):
"""Mechanism for iterating over out-edges incident to a node in a circular
manner. StopIteration exception is raised when wraparound occurs.
"""
__slots__ = ('_edges', '_it', '_curr')
def __init__(self, edges):
self._edges = edges
if self._edges:
self._rewind()
def get(self):
return self._curr
def move_to_next(self):
try:
self._curr = next(self._it)
except StopIteration:
self._rewind()
raise
def _rewind(self):
self._it = iter(self._edges.items())
self._curr = next(self._it)
class Level(object):
"""Active and inactive nodes in a level.
"""
__slots__ = ('active', 'inactive')
def __init__(self):
self.active = set()
self.inactive = set()
class GlobalRelabelThreshold(object):
"""Measurement of work before the global relabeling heuristic should be
applied.
"""
def __init__(self, n, m, freq):
self._threshold = (n + m) / freq if freq else float('inf')
self._work = 0
def add_work(self, work):
self._work += work
def is_reached(self):
return self._work >= self._threshold
def clear_work(self):
self._work = 0
def build_residual_network(G, capacity):
"""Build a residual network and initialize a zero flow.
The residual network :samp:`R` from an input graph :samp:`G` has the
same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair
of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a
self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists
in :samp:`G`.
For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']`
is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists
in :samp:`G` or zero otherwise. If the capacity is infinite,
:samp:`R[u][v]['capacity']` will have a high arbitrary finite value
that does not affect the solution of the problem. This value is stored in
:samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`,
:samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and
satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`.
The flow value, defined as the total flow into :samp:`t`, the sink, is
stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not
specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such
that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum
:samp:`s`-:samp:`t` cut.
"""
if G.is_multigraph():
raise nx.NetworkXError(
'MultiGraph and MultiDiGraph not supported (yet).')
R = nx.DiGraph()
R.add_nodes_from(G)
inf = float('inf')
# Extract edges with positive capacities. Self loops excluded.
edge_list = [(u, v, attr) for u, v, attr in G.edges(data=True)
if u != v and attr.get(capacity, inf) > 0]
# Simulate infinity with three times the sum of the finite edge capacities
# or any positive value if the sum is zero. This allows the
# infinite-capacity edges to be distinguished for unboundedness detection
# and directly participate in residual capacity calculation. If the maximum
# flow is finite, these edges cannot appear in the minimum cut and thus
# guarantee correctness. Since the residual capacity of an
# infinite-capacity edge is always at least 2/3 of inf, while that of an
# finite-capacity edge is at most 1/3 of inf, if an operation moves more
# than 1/3 of inf units of flow to t, there must be an infinite-capacity
# s-t path in G.
inf = 3 * sum(attr[capacity] for u, v, attr in edge_list
if capacity in attr and attr[capacity] != inf) or 1
if G.is_directed():
for u, v, attr in edge_list:
r = min(attr.get(capacity, inf), inf)
if not R.has_edge(u, v):
# Both (u, v) and (v, u) must be present in the residual
# network.
R.add_edge(u, v, capacity=r)
R.add_edge(v, u, capacity=0)
else:
# The edge (u, v) was added when (v, u) was visited.
R[u][v]['capacity'] = r
else:
for u, v, attr in edge_list:
# Add a pair of edges with equal residual capacities.
r = min(attr.get(capacity, inf), inf)
R.add_edge(u, v, capacity=r)
R.add_edge(v, u, capacity=r)
# Record the value simulating infinity.
R.graph['inf'] = inf
return R
def detect_unboundedness(R, s, t):
"""Detect an infinite-capacity s-t path in R.
"""
q = deque([s])
seen = set([s])
inf = R.graph['inf']
while q:
u = q.popleft()
for v, attr in R[u].items():
if attr['capacity'] == inf and v not in seen:
if v == t:
raise nx.NetworkXUnbounded(
'Infinite capacity path, flow unbounded above.')
seen.add(v)
q.append(v)
def build_flow_dict(G, R):
"""Build a flow dictionary from a residual network.
"""
flow_dict = {}
for u in G:
flow_dict[u] = dict((v, 0) for v in G[u])
flow_dict[u].update((v, attr['flow']) for v, attr in R[u].items()
if attr['flow'] > 0)
return flow_dict
| gpl-3.0 |
Lightmatter/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/django/contrib/sessions/backends/signed_cookies.py | 288 | 2798 | from django.conf import settings
from django.core import signing
from django.contrib.sessions.backends.base import SessionBase
class SessionStore(SessionBase):
def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
try:
return signing.loads(self.session_key,
serializer=self.serializer,
# This doesn't handle non-default expiry dates, see #19201
max_age=settings.SESSION_COOKIE_AGE,
salt='django.contrib.sessions.backends.signed_cookies')
except (signing.BadSignature, ValueError):
self.create()
return {}
def create(self):
"""
To create a new key, we simply make sure that the modified flag is set
so that the cookie is set on the client for the current request.
"""
self.modified = True
def save(self, must_create=False):
"""
To save, we get the session key as a securely signed string and then
set the modified flag so that the cookie is set on the client for the
current request.
"""
self._session_key = self._get_session_key()
self.modified = True
def exists(self, session_key=None):
"""
This method makes sense when you're talking to a shared resource, but
it doesn't matter when you're storing the information in the client's
cookie.
"""
return False
def delete(self, session_key=None):
"""
To delete, we clear the session key and the underlying data structure
and set the modified flag so that the cookie is set on the client for
the current request.
"""
self._session_key = ''
self._session_cache = {}
self.modified = True
def cycle_key(self):
"""
Keeps the same data but with a new key. To do this, we just have to
call ``save()`` and it will automatically save a cookie with a new key
at the end of the request.
"""
self.save()
def _get_session_key(self):
"""
Most session backends don't need to override this method, but we do,
because instead of generating a random string, we want to actually
generate a secure url-safe Base64-encoded string of data as our
session key.
"""
session_cache = getattr(self, '_session_cache', {})
return signing.dumps(session_cache, compress=True,
salt='django.contrib.sessions.backends.signed_cookies',
serializer=self.serializer)
@classmethod
def clear_expired(cls):
pass
| mit |
OptiPop/external_chromium_org | tools/android/memdump/memsymbols.py | 170 | 4857 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import os
import sys
import re
from optparse import OptionParser
"""Extracts the list of resident symbols of a library loaded in a process.
This scripts combines the extended output of memdump for a given process
(obtained through memdump -x PID) and the symbol table of a .so loaded in that
process (obtained through nm -C lib-with-symbols.so), filtering out only those
symbols that, at the time of the snapshot, were resident in memory (that are,
the symbols which start address belongs to a mapped page of the .so which was
resident at the time of the snapshot).
The aim is to perform a "code coverage"-like profiling of a binary, intersecting
run-time information (list of resident pages) and debug symbols.
"""
_PAGE_SIZE = 4096
def _TestBit(word, bit):
assert(bit >= 0 and bit < 8)
return not not ((word >> bit) & 1)
def _HexAddr(addr):
return hex(addr)[2:].zfill(8)
def _GetResidentPagesSet(memdump_contents, lib_name, verbose):
"""Parses the memdump output and extracts the resident page set for lib_name.
Args:
memdump_contents: Array of strings (lines) of a memdump output.
lib_name: A string containing the name of the library.so to be matched.
verbose: Print a verbose header for each mapping matched.
Returns:
A set of resident pages (the key is the page index) for all the
mappings matching .*lib_name.
"""
resident_pages = set()
MAP_RX = re.compile(
r'^([0-9a-f]+)-([0-9a-f]+) ([\w-]+) ([0-9a-f]+) .* "(.*)" \[(.*)\]$')
for line in memdump_contents:
line = line.rstrip('\r\n')
if line.startswith('[ PID'):
continue
r = MAP_RX.match(line)
if not r:
sys.stderr.write('Skipping %s from %s\n' % (line, memdump_file))
continue
map_start = int(r.group(1), 16)
map_end = int(r.group(2), 16)
prot = r.group(3)
offset = int(r.group(4), 16)
assert(offset % _PAGE_SIZE == 0)
lib = r.group(5)
enc_bitmap = r.group(6)
if not lib.endswith(lib_name):
continue
bitmap = base64.b64decode(enc_bitmap)
map_pages_count = (map_end - map_start + 1) / _PAGE_SIZE
bitmap_pages_count = len(bitmap) * 8
if verbose:
print 'Found %s: mapped %d pages in mode %s @ offset %s.' % (
lib, map_pages_count, prot, _HexAddr(offset))
print ' Map range in the process VA: [%s - %s]. Len: %s' % (
_HexAddr(map_start),
_HexAddr(map_end),
_HexAddr(map_pages_count * _PAGE_SIZE))
print ' Corresponding addresses in the binary: [%s - %s]. Len: %s' % (
_HexAddr(offset),
_HexAddr(offset + map_end - map_start),
_HexAddr(map_pages_count * _PAGE_SIZE))
print ' Bitmap: %d pages' % bitmap_pages_count
print ''
assert(bitmap_pages_count >= map_pages_count)
for i in xrange(map_pages_count):
bitmap_idx = i / 8
bitmap_off = i % 8
if (bitmap_idx < len(bitmap) and
_TestBit(ord(bitmap[bitmap_idx]), bitmap_off)):
resident_pages.add(offset / _PAGE_SIZE + i)
return resident_pages
def main(argv):
NM_RX = re.compile(r'^([0-9a-f]+)\s+.*$')
parser = OptionParser()
parser.add_option("-r", "--reverse",
action="store_true", dest="reverse", default=False,
help="Print out non present symbols")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="Print out verbose debug information.")
(options, args) = parser.parse_args()
if len(args) != 3:
print 'Usage: %s [-v] memdump.file nm.file library.so' % (
os.path.basename(argv[0]))
return 1
memdump_file = args[0]
nm_file = args[1]
lib_name = args[2]
if memdump_file == '-':
memdump_contents = sys.stdin.readlines()
else:
memdump_contents = open(memdump_file, 'r').readlines()
resident_pages = _GetResidentPagesSet(memdump_contents,
lib_name,
options.verbose)
# Process the nm symbol table, filtering out the resident symbols.
nm_fh = open(nm_file, 'r')
for line in nm_fh:
line = line.rstrip('\r\n')
# Skip undefined symbols (lines with no address).
if line.startswith(' '):
continue
r = NM_RX.match(line)
if not r:
sys.stderr.write('Skipping %s from %s\n' % (line, nm_file))
continue
sym_addr = int(r.group(1), 16)
sym_page = sym_addr / _PAGE_SIZE
last_sym_matched = (sym_page in resident_pages)
if (sym_page in resident_pages) != options.reverse:
print line
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
OptiPop/external_chromium_org | tools/memory_inspector/memory_inspector/core/stacktrace.py | 89 | 2438 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
from memory_inspector.core import symbol
class Stacktrace(object):
"""Models a stack-trace, which is a sequence of stack |Frame|s."""
def __init__(self):
self.frames = []
def Add(self, frame):
assert(isinstance(frame, Frame))
self.frames += [frame]
@property
def depth(self):
return len(self.frames)
def __getitem__(self, index):
return self.frames[index]
def __str__(self):
return ', '.join([str(x) for x in self.frames])
class Frame(object):
"""Models a stack frame in a |Stacktrace|. It might be symbolized or not."""
def __init__(self, address):
"""
Args:
address: the absolute (virtual) address of the stack frame in the
original process virtual address space.
"""
assert(isinstance(address, (long, int)))
self.address = address
self.symbol = None
self.exec_file_rel_path = None
# Offset is the displacement inside the executable file, calculated as:
# self.address - mapping_address_of_the_so + mapping_offset_of_the_exec.
self.offset = None
def SetExecFileInfo(self, exec_file_rel_path, offset):
"""Sets the base file + offset information required for symbolization.
Args:
exec_file_rel_path: the path of the mapped executable (binary or lib)
relative to the target device (e.g., /system/lib/libc.so).
offset: the offset in the executable.
"""
assert(isinstance(offset, (long, int)))
self.exec_file_rel_path = exec_file_rel_path
self.offset = offset
def SetSymbolInfo(self, sym):
"""Sets the symbolization information."""
assert(isinstance(sym, symbol.Symbol))
assert(not self.symbol)
self.symbol = sym
@property
def exec_file_name(self):
"""Returns the file name (stripped of the path) of the executable."""
if not self.exec_file_rel_path:
return None
return posixpath.basename(self.exec_file_rel_path.replace('\\', '/'))
@property
def raw_address(self):
if self.exec_file_rel_path:
return '%s +0x%x' % (self.exec_file_name, self.offset)
else:
return '0x%x' % self.address
def __str__(self):
if self.symbol:
return str(self.symbol)
elif self.exec_file_rel_path:
return self.raw_address | bsd-3-clause |
yfdyh000/pontoon | pontoon/sync/formats/__init__.py | 2 | 1325 | """
Parsing resource files.
See base.py for the ParsedResource base class.
"""
import os.path
from pontoon.sync.formats import lang, po, silme, xliff
# To add support for a new resource format, add an entry to this dict
# where the key is the extension you're parsing and the value is a
# callable returning an instance of a ParsedResource subclass.
SUPPORTED_FORMAT_PARSERS = {
'.lang': lang.parse,
'.po': po.parse,
'.pot': po.parse,
'.xliff': xliff.parse,
'.dtd': silme.parse_dtd,
'.properties': silme.parse_properties,
'.ini': silme.parse_ini,
'.inc': silme.parse_inc,
}
def parse(path, source_path=None):
"""
Parse the resource file at the given path and return a
ParsedResource with its translations.
:param path:
Path to the resource file to parse.
:param source_path:
Path to the corresponding resource file in the source directory
for the resource we're parsing. Asymmetric formats need this
for saving. Defaults to None.
"""
root, extension = os.path.splitext(path)
if extension in SUPPORTED_FORMAT_PARSERS:
return SUPPORTED_FORMAT_PARSERS[extension](path, source_path=source_path)
else:
raise ValueError('Translation format {0} is not supported.'
.format(extension))
| bsd-3-clause |
tuxfux-hlp-notes/python-batches | batch-67/19-files/myenv/lib/python2.7/site-packages/setuptools/monkey.py | 39 | 5789 | """
Monkey patching of distutils.
"""
import sys
import distutils.filelist
import platform
import types
import functools
from importlib import import_module
import inspect
from setuptools.extern import six
import setuptools
__all__ = []
"""
Everything is private. Contact the project team
if you think you need this functionality.
"""
def _get_mro(cls):
"""
Returns the bases classes for cls sorted by the MRO.
Works around an issue on Jython where inspect.getmro will not return all
base classes if multiple classes share the same name. Instead, this
function will return a tuple containing the class itself, and the contents
of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
"""
if platform.python_implementation() == "Jython":
return (cls,) + cls.__bases__
return inspect.getmro(cls)
def get_unpatched(item):
lookup = (
get_unpatched_class if isinstance(item, six.class_types) else
get_unpatched_function if isinstance(item, types.FunctionType) else
lambda item: None
)
return lookup(item)
def get_unpatched_class(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
external_bases = (
cls
for cls in _get_mro(cls)
if not cls.__module__.startswith('setuptools')
)
base = next(external_bases)
if not base.__module__.startswith('distutils'):
msg = "distutils has already been patched by %r" % cls
raise AssertionError(msg)
return base
def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
has_issue_12885 = sys.version_info <= (3, 5, 3)
if has_issue_12885:
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = setuptools.findall
needs_warehouse = (
sys.version_info < (2, 7, 13)
or
(3, 0) < sys.version_info < (3, 3, 7)
or
(3, 4) < sys.version_info < (3, 4, 6)
or
(3, 5) < sys.version_info <= (3, 5, 3)
)
if needs_warehouse:
warehouse = 'https://upload.pypi.org/legacy/'
distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
_patch_distribution_metadata_write_pkg_file()
_patch_distribution_metadata_write_pkg_info()
# Install Distribution throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = setuptools.dist.Distribution
# Install the patched Extension
distutils.core.Extension = setuptools.extension.Extension
distutils.extension.Extension = setuptools.extension.Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = (
setuptools.extension.Extension
)
patch_for_msvc_specialized_compiler()
def _patch_distribution_metadata_write_pkg_file():
"""Patch write_pkg_file to also write Requires-Python/Requires-External"""
distutils.dist.DistributionMetadata.write_pkg_file = (
setuptools.dist.write_pkg_file
)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
distutils.dist.DistributionMetadata.write_pkg_info = (
setuptools.dist.write_pkg_info
)
def patch_func(replacement, target_mod, func_name):
"""
Patch func_name in target_mod with replacement
Important - original must be resolved by name to avoid
patching an already patched function.
"""
original = getattr(target_mod, func_name)
# set the 'unpatched' attribute on the replacement to
# point to the original.
vars(replacement).setdefault('unpatched', original)
# replace the function in the original module
setattr(target_mod, func_name, replacement)
def get_unpatched_function(candidate):
return getattr(candidate, 'unpatched')
def patch_for_msvc_specialized_compiler():
"""
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
# import late to avoid circular imports on Python < 3.5
msvc = import_module('setuptools.msvc')
if platform.system() != 'Windows':
# Compilers only availables on Microsoft Windows
return
def patch_params(mod_name, func_name):
"""
Prepare the parameters for patch_func to patch indicated function.
"""
repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
repl_name = repl_prefix + func_name.lstrip('_')
repl = getattr(msvc, repl_name)
mod = import_module(mod_name)
if not hasattr(mod, func_name):
raise ImportError(func_name)
return repl, mod, func_name
# Python 2.7 to 3.4
msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
# Python 3.5+
msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
try:
# Patch distutils.msvc9compiler
patch_func(*msvc9('find_vcvarsall'))
patch_func(*msvc9('query_vcvarsall'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler._get_vc_env
patch_func(*msvc14('_get_vc_env'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler.gen_lib_options for Numpy
patch_func(*msvc14('gen_lib_options'))
except ImportError:
pass
| gpl-3.0 |
konstruktoid/ansible-upstream | lib/ansible/modules/cloud/docker/docker_secret.py | 16 | 8297 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_secret
short_description: Manage docker secrets.
version_added: "2.4"
description:
- Create and remove Docker secrets in a Swarm environment. Similar to `docker secret create` and `docker secret rm`.
- Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
- in future runs to test if a secret has changed.
- If 'ansible_key is not present, then a secret will not be updated unless the C(force) option is set.
- Updates to secrets are performed by removing the secret and creating it again.
options:
data:
description:
- String. The value of the secret. Required when state is C(present).
required: false
labels:
description:
- "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
- If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
required: false
force:
description:
- Use with state C(present) to always remove and recreate an existing secret.
- If I(true), an existing secret will be replaced, even if it has not changed.
default: false
type: bool
name:
description:
- The name of the secret.
required: true
state:
description:
- Set to C(present), if the secret should exist, and C(absent), if it should not.
required: false
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
requirements:
- "docker-py >= 2.1.0"
- "Docker API >= 1.25"
author:
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: Create secret foo
docker_secret:
name: foo
data: Hello World!
state: present
- name: Change the secret data
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
state: present
- name: Add a new label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Adding a new label will cause a remove/create of the secret
two: '2'
state: present
- name: No change
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Even though 'two' is missing, there is no change to the existing secret
state: present
- name: Update an existing label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: monkey # Changing a label will cause a remove/create of the secret
one: '1'
state: present
- name: Force the removal/creation of the secret
docker_secret:
name: foo
data: Goodnight everyone!
force: yes
state: present
- name: Remove secret foo
docker_secret:
name: foo
state: absent
'''
RETURN = '''
secret_id:
description:
- The ID assigned by Docker to the secret object.
returned: success
type: string
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
'''
import hashlib
try:
from docker.errors import APIError
except ImportError:
# missing docker-py handled in ansible.module_utils.docker
pass
from ansible.module_utils.docker_common import AnsibleDockerClient, DockerBaseClass
from ansible.module_utils._text import to_native, to_bytes
class SecretManager(DockerBaseClass):
def __init__(self, client, results):
super(SecretManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters.get('name')
self.state = parameters.get('state')
self.data = parameters.get('data')
self.labels = parameters.get('labels')
self.force = parameters.get('force')
self.data_key = None
def __call__(self):
if self.state == 'present':
self.data_key = hashlib.sha224(to_bytes(self.data)).hexdigest()
self.present()
elif self.state == 'absent':
self.absent()
def get_secret(self):
''' Find an existing secret. '''
try:
secrets = self.client.secrets(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
for secret in secrets:
if secret['Spec']['Name'] == self.name:
return secret
return None
def create_secret(self):
''' Create a new secret '''
secret_id = None
# We can't see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}
if self.labels:
labels.update(self.labels)
try:
if not self.check_mode:
secret_id = self.client.create_secret(self.name, self.data, labels=labels)
except APIError as exc:
self.client.fail("Error creating secret: %s" % to_native(exc))
if isinstance(secret_id, dict):
secret_id = secret_id['ID']
return secret_id
def present(self):
''' Handles state == 'present', creating or updating the secret '''
secret = self.get_secret()
if secret:
self.results['secret_id'] = secret['ID']
data_changed = False
attrs = secret.get('Spec', {})
if attrs.get('Labels', {}).get('ansible_key'):
if attrs['Labels']['ansible_key'] != self.data_key:
data_changed = True
labels_changed = False
if self.labels and attrs.get('Labels'):
# check if user requested a label change
for label in attrs['Labels']:
if self.labels.get(label) and self.labels[label] != attrs['Labels'][label]:
labels_changed = True
# check if user added a label
labels_added = False
if self.labels:
if attrs.get('Labels'):
for label in self.labels:
if label not in attrs['Labels']:
labels_added = True
else:
labels_added = True
if data_changed or labels_added or labels_changed or self.force:
# if something changed or force, delete and re-create the secret
self.absent()
secret_id = self.create_secret()
self.results['changed'] = True
self.results['secret_id'] = secret_id
else:
self.results['changed'] = True
self.results['secret_id'] = self.create_secret()
def absent(self):
''' Handles state == 'absent', removing the secret '''
secret = self.get_secret()
if secret:
try:
if not self.check_mode:
self.client.remove_secret(secret['ID'])
except APIError as exc:
self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
self.results['changed'] = True
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['absent', 'present'], default='present'),
data=dict(type='str', no_log=True),
labels=dict(type='dict'),
force=dict(type='bool', default=False)
)
required_if = [
('state', 'present', ['data'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if
)
results = dict(
changed=False,
secret_id=''
)
SecretManager(client, results)()
client.module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
clobrano/personfinder | app/create.py | 8 | 11669 | #!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from model import *
from photo import create_photo, PhotoError
from utils import *
from detect_spam import SpamDetector
import simplejson
from django.utils.translation import ugettext as _
def validate_date(string):
"""Parses a date in YYYY-MM-DD format. This is a special case for manual
entry of the source_date in the creation form. Unlike the validators in
utils.py, this will throw an exception if the input is badly formatted."""
year, month, day = map(int, string.strip().split('-'))
return datetime(year, month, day)
def days_to_date(days):
"""Converts a duration signifying days-from-now to a datetime object.
Returns:
None if days is None, else now + days (in utc)"""
return days and get_utcnow() + timedelta(days=days)
class Handler(BaseHandler):
def get(self):
self.params.create_mode = True
profile_websites = [add_profile_icon_url(website, self)
for website in self.config.profile_websites or []]
self.render('create.html',
profile_websites=profile_websites,
profile_websites_json=simplejson.dumps(profile_websites),
onload_function='view_page_loaded()')
def post(self):
now = get_utcnow()
# Several messages here exceed the 80-column limit because django's
# makemessages script can't handle messages split across lines. :(
if self.config.use_family_name:
if not (self.params.given_name and self.params.family_name):
return self.error(400, _('The Given name and Family name are both required. Please go back and try again.'))
else:
if not self.params.given_name:
return self.error(400, _('Name is required. Please go back and try again.'))
if not self.params.author_name:
if self.params.clone:
return self.error(400, _('The Original author\'s name is required. Please go back and try again.'))
else:
return self.error(400, _('Your name is required in the "Source" section. Please go back and try again.'))
if self.params.add_note:
if not self.params.text:
return self.error(400, _('Message is required. Please go back and try again.'))
if self.params.status == 'is_note_author' and \
not self.params.author_made_contact:
return self.error(400, _('Please check that you have been in contact with the person after the earthquake, or change the "Status of this person" field.'))
if (self.params.status == 'believed_dead' and \
not self.config.allow_believed_dead_via_ui):
return self.error(400, _('Not authorized to post notes with the status "I have received information that this person is dead".'))
source_date = None
if self.params.source_date:
try:
source_date = validate_date(self.params.source_date)
except ValueError:
return self.error(400, _('Original posting date is not in YYYY-MM-DD format, or is a nonexistent date. Please go back and try again.'))
if source_date > now:
return self.error(400, _('Date cannot be in the future. Please go back and try again.'))
expiry_date = days_to_date(self.params.expiry_option or
self.config.default_expiry_days)
# If nothing was uploaded, just use the photo_url that was provided.
photo, photo_url = (None, self.params.photo_url)
note_photo, note_photo_url = (None, self.params.note_photo_url)
try:
# If a photo was uploaded, create a Photo entry and get the URL
# where we serve it.
if self.params.photo is not None:
photo, photo_url = create_photo(self.params.photo, self)
if self.params.note_photo is not None:
note_photo, note_photo_url = \
create_photo(self.params.note_photo, self)
except PhotoError, e:
return self.error(400, e.message)
# Finally, store the Photo. Past this point, we should NOT self.error.
if photo:
photo.put()
if note_photo:
note_photo.put()
profile_urls = []
if self.params.profile_url1:
profile_urls.append(self.params.profile_url1)
if self.params.profile_url2:
profile_urls.append(self.params.profile_url2)
if self.params.profile_url3:
profile_urls.append(self.params.profile_url3)
# Person records have to have a source_date; if none entered, use now.
source_date = source_date or now
# Determine the source name, or fill it in if the record is original
# (i.e. created for the first time here, not copied from elsewhere).
source_name = self.params.source_name
if not self.params.clone:
# record originated here
if self.params.referrer:
source_name = "%s (referred by %s)" % (self.env.netloc,
self.params.referrer)
else:
source_name = self.env.netloc
person = Person.create_original(
self.repo,
entry_date=now,
expiry_date=expiry_date,
given_name=self.params.given_name,
family_name=self.params.family_name,
full_name=get_full_name(self.params.given_name,
self.params.family_name,
self.config),
alternate_names=get_full_name(self.params.alternate_given_names,
self.params.alternate_family_names,
self.config),
description=self.params.description,
sex=self.params.sex,
date_of_birth=self.params.date_of_birth,
age=self.params.age,
home_street=self.params.home_street,
home_city=self.params.home_city,
home_state=self.params.home_state,
home_postal_code=self.params.home_postal_code,
home_neighborhood=self.params.home_neighborhood,
home_country=self.params.home_country,
profile_urls='\n'.join(profile_urls),
author_name=self.params.author_name,
author_phone=self.params.author_phone,
author_email=self.params.author_email,
source_url=self.params.source_url,
source_date=source_date,
source_name=source_name,
photo=photo,
photo_url=photo_url
)
person.update_index(['old', 'new'])
if self.params.add_note:
spam_detector = SpamDetector(self.config.bad_words)
spam_score = spam_detector.estimate_spam_score(self.params.text)
if (spam_score > 0):
note = NoteWithBadWords.create_original(
self.repo,
entry_date=get_utcnow(),
person_record_id=person.record_id,
author_name=self.params.author_name,
author_email=self.params.author_email,
author_phone=self.params.author_phone,
source_date=source_date,
author_made_contact=bool(self.params.author_made_contact),
status=self.params.status,
email_of_found_person=self.params.email_of_found_person,
phone_of_found_person=self.params.phone_of_found_person,
last_known_location=self.params.last_known_location,
text=self.params.text,
photo=note_photo,
photo_url=note_photo_url,
spam_score=spam_score,
confirmed=False)
# Write the new NoteWithBadWords to the datastore
db.put(note)
UserActionLog.put_new('add', note, copy_properties=False)
# Write the person record to datastore before redirect
db.put(person)
UserActionLog.put_new('add', person, copy_properties=False)
# When the note is detected as spam, we do not update person
# record with this note or log action. We ask the note author
# for confirmation first.
return self.redirect('/post_flagged_note',
id=note.get_record_id(),
author_email=note.author_email,
repo=self.repo)
else:
note = Note.create_original(
self.repo,
entry_date=get_utcnow(),
person_record_id=person.record_id,
author_name=self.params.author_name,
author_email=self.params.author_email,
author_phone=self.params.author_phone,
source_date=source_date,
author_made_contact=bool(self.params.author_made_contact),
status=self.params.status,
email_of_found_person=self.params.email_of_found_person,
phone_of_found_person=self.params.phone_of_found_person,
last_known_location=self.params.last_known_location,
text=self.params.text,
photo=note_photo,
photo_url=note_photo_url)
# Write the new Note to the datastore
db.put(note)
UserActionLog.put_new('add', note, copy_properties=False)
person.update_from_note(note)
# Specially log 'believed_dead'.
if note.status == 'believed_dead':
UserActionLog.put_new(
'mark_dead', note, person.primary_full_name,
self.request.remote_addr)
# Write the person record to datastore
db.put(person)
UserActionLog.put_new('add', person, copy_properties=False)
# TODO(ryok): we could do this earlier so we don't neet to db.put twice.
if not person.source_url and not self.params.clone:
# Put again with the URL, now that we have a person_record_id.
person.source_url = self.get_url('/view', id=person.record_id)
db.put(person)
# TODO(ryok): batch-put person, note, photo, note_photo here.
# If user wants to subscribe to updates, redirect to the subscribe page
if self.params.subscribe:
return self.redirect('/subscribe',
id=person.record_id,
subscribe_email=self.params.author_email,
context='create_person')
self.redirect('/view', id=person.record_id)
| apache-2.0 |
akhilari7/pa-dude | lib/python2.7/site-packages/setuptools/package_index.py | 95 | 39490 | """PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
import itertools
from functools import wraps
try:
from urllib.parse import splituser
except ImportError:
from urllib2 import splituser
from setuptools.extern import six
from setuptools.extern.six.moves import urllib, http_client, configparser, map
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
parts = urllib.parse.urlparse(url)
scheme, server, path, parameters, query, fragment = parts
base = urllib.parse.unquote(path.split('/')[-1])
if server=='sourceforge.net' and base=='download': # XXX Yuck
base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match('py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
)
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urllib.parse.urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else: self.opener = urllib.request.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, urllib.error.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urllib.parse.urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
dirs = filter(os.path.isdir, search_path)
egg_links = (
(path, entry)
for path in dirs
for entry in os.listdir(path)
if entry.endswith('.egg-link')
)
list(itertools.starmap(self.scan_egg_link, egg_links))
def scan_egg_link(self, path, entry):
with open(os.path.join(path, entry)) as raw_lines:
# filter non-empty lines
lines = list(filter(None, map(str.strip, raw_lines)))
if len(lines) != 2:
# format is not recognized; punt
return
egg_path, setup_path = lines
for dist in find_distributions(os.path.join(path, egg_path)):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=self.download(dist.location, tmpdir))
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, info = None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, urllib.error.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename,'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp: fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, http_client.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib.error.HTTPError as v:
return v
except urllib.error.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except http_client.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except http_client.HTTPException as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme=='file':
return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/',1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':',1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username="+auth
netloc = host
parts = scheme, netloc, url, p, q, f
url = urllib.parse.urlunparse(parts)
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#',1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return six.unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = six.moves.html_entities.name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = urllib.parse.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(configparser.RawConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
configparser.RawConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib.request.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urllib.parse.urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise http_client.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)' % info)
if auth:
auth = "Basic " + _encode_auth(auth)
parts = scheme, host, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2==scheme and h2==host:
parts = s2, netloc, path2, param2, query2, frag2
fp.url = urllib.parse.urlunparse(parts)
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = ("<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream)
| mit |
blois/AndroidSDKCloneMin | ndk/prebuilt/linux-x86_64/lib/python2.7/unittest/test/test_case.py | 89 | 43816 | import difflib
import pprint
import pickle
import re
import sys
from copy import deepcopy
from test import test_support
import unittest
from .support import (
TestEquality, TestHashing, LoggingResult, ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest'))
,(Test.Foo('test1'), Test.Bar('test1'))
,(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise TypeError()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise TypeError()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise TypeError()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addError', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addError',
'tearDown', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'addFailure', 'tearDown',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'addFailure',
'tearDown', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), basestring)
# "If result is omitted or None, a temporary result object is created
# and used, but is not made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return LoggingResult(events)
# Make run() find a result object on its own
Foo('test').run()
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertNotEqual(s1, s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) is type(b) is SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with test_support.check_warnings(("", UnicodeWarning)):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': u'\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) < len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**8
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = u'x' * (2**7)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = u'x' * (2**9)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertItemsEqual(self):
a = object()
self.assertItemsEqual([1, 2, 3], [3, 2, 1])
self.assertItemsEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertItemsEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertItemsEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertItemsEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertItemsEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertItemsEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertItemsEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertItemsEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertItemsEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertItemsEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertItemsEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertItemsEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertItemsEqual(a, b)
# test utility functions supporting assertItemsEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try Unicode
self.assertGreater(u'bug', u'ant')
self.assertGreaterEqual(u'bug', u'ant')
self.assertGreaterEqual(u'ant', u'ant')
self.assertLess(u'ant', u'bug')
self.assertLessEqual(u'ant', u'bug')
self.assertLessEqual(u'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreater, u'ant', u'bug')
self.assertRaises(self.failureException, self.assertGreater, u'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, u'ant',
u'bug')
self.assertRaises(self.failureException, self.assertLess, u'bug', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'ant', u'ant')
self.assertRaises(self.failureException, self.assertLessEqual, u'bug', u'ant')
# Try Mixed String/Unicode
self.assertGreater('bug', u'ant')
self.assertGreater(u'bug', 'ant')
self.assertGreaterEqual('bug', u'ant')
self.assertGreaterEqual(u'bug', 'ant')
self.assertGreaterEqual('ant', u'ant')
self.assertGreaterEqual(u'ant', 'ant')
self.assertLess('ant', u'bug')
self.assertLess(u'ant', 'bug')
self.assertLessEqual('ant', u'bug')
self.assertLessEqual(u'ant', 'bug')
self.assertLessEqual('ant', u'ant')
self.assertLessEqual(u'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u'bug')
self.assertRaises(self.failureException, self.assertGreater, u'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', u'ant')
self.assertRaises(self.failureException, self.assertGreater, u'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant',
u'bug')
self.assertRaises(self.failureException, self.assertGreaterEqual, u'ant',
'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', u'ant')
self.assertRaises(self.failureException, self.assertLess, u'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', u'ant')
self.assertRaises(self.failureException, self.assertLessEqual, u'bug', 'ant')
def testAssertMultiLineEqual(self):
sample_text = b"""\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = b"""\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = b"""\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
for type_changer in (lambda x: x, lambda x: x.decode('utf8')):
try:
self.assertMultiLineEqual(type_changer(sample_text),
type_changer(revised_sample_text))
except self.failureException, e:
# need to remove the first line of the error message
error = str(e).encode('utf8').split('\n', 1)[1]
# assertMultiLineEqual is hooked up as the default for
# unicode strings - so we can't use it for this check
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = u"laden swallows fly slowly"
revised_sample_text = u"unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegexpMatches(self):
self.assertRegexpMatches('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegexpMatches,
'saaas', r'aaaa')
def testAssertRaisesRegexp(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegexp(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegexp(ExceptionMock, 'expect$', Stub)
self.assertRaisesRegexp(ExceptionMock, u'expect$', Stub)
def testAssertNotRaisesRegexp(self):
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, 'x',
lambda: None)
self.assertRaisesRegexp(
self.failureException, '^Exception not raised$',
self.assertRaisesRegexp, Exception, u'x',
lambda: None)
def testAssertRaisesRegexpMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, '^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception, u'^Expected$',
Stub)
self.assertRaisesRegexp(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegexp, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testSynonymAssertMethodNames(self):
"""Test undocumented method name synonyms.
Please do not use these methods names in your own code.
This test confirms their continued existence and functionality
in order to avoid breaking existing code.
"""
self.assertNotEquals(3, 5)
self.assertEquals(3, 3)
self.assertAlmostEquals(2.0, 2.0)
self.assertNotAlmostEquals(3.0, 5.0)
self.assert_(True)
def testPendingDeprecationMethodNames(self):
"""Test fail* methods pending deprecation, they will warn in 3.2.
Do not use these methods. They will go away in 3.3.
"""
with test_support.check_warnings():
self.failIfEqual(3, 5)
self.failUnlessEqual(3, 3)
self.failUnlessAlmostEqual(2.0, 2.0)
self.failIfAlmostEqual(3.0, 5.0)
self.failUnless(True)
self.failUnlessRaises(TypeError, lambda _: 3.14 + u'spam')
self.failIf(False)
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
perror/hackingweek | hackingweek/templatetags/hackingweek_tags.py | 1 | 2799 | from datetime import datetime
from django import template
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template.base import Variable, VariableDoesNotExist
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from hackingweek.models import Challenge, Team, Validation
from hackingweek.utils import begin_date
register = template.Library()
@register.simple_tag
def user_count():
return User.objects.filter(is_active=True).exclude(is_staff=True).count()
@register.simple_tag
def team_count():
return Team.objects.all().count()
@register.simple_tag
def challenge_count(request):
if timezone.now() <= begin_date():
return 0
else:
challenge_count = Challenge.objects.all().count()
if request.user.is_anonymous():
return challenge_count
else:
try:
user_team = request.user.team_set.filter()[:1].get()
except Team.DoesNotExist:
return challenge_count
validation_count = Validation.objects.filter(team=user_team).count()
return challenge_count - validation_count
@register.simple_tag
def challenge_button_color(challenge_status, pk, has_team):
count, is_done, is_breakthrough = challenge_status[pk]
if (has_team):
if (is_done):
return "success"
else:
return "danger"
else:
return "info"
@register.simple_tag
def challenge_score(challenge_status, active_teams, pk):
count, is_done, is_breakthrough = challenge_status[pk]
if (count == 0):
return active_teams * (active_teams + 1)
elif (is_done):
if (is_breakthrough):
return active_teams * (active_teams - count + 2)
else:
return active_teams * (active_teams - count + 1)
else:
return active_teams * (active_teams - count)
@register.simple_tag
def challenge_button_text(challenge_status, pk, has_team):
count, is_done, is_breakthrough = challenge_status[pk]
if (has_team):
if (is_done):
return _("Done")
else:
return _("To Do")
else:
return _("View")
@register.simple_tag
def active(request, pattern):
import re
if re.search(reverse(pattern), request):
return ' class="active"'
else:
return ''
@register.filter
def sort_lower(value, arg):
try:
return sorted(value,
key=Variable(arg).resolve,
cmp=lambda x,y: cmp(x.lower(), y.lower()))
except (TypeError, VariableDoesNotExist):
return ''
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.filter
def repeat(number):
return range(number)
| bsd-2-clause |
ZihengJiang/mxnet | example/speech-demo/config_util.py | 25 | 2990 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import os
import sys
import argparse
import mxnet as mx
import numpy as np
if sys.version_info >= (3, 0):
import configparser
else:
import ConfigParser as configparser
def parse_args():
default_cfg = configparser.ConfigParser()
default_cfg.read(os.path.join(os.path.dirname(__file__), 'default.cfg'))
parser = argparse.ArgumentParser()
parser.add_argument("--configfile", help="config file for training parameters")
# those allow us to overwrite the configs through command line
for sec in default_cfg.sections():
for name, _ in default_cfg.items(sec):
arg_name = '--%s_%s' % (sec, name)
doc = 'Overwrite %s in section [%s] of config file' % (name, sec)
parser.add_argument(arg_name, help=doc)
args = parser.parse_args()
if args.configfile is not None:
# now read the user supplied config file to overwrite some values
default_cfg.read(args.configfile)
# now overwrite config from command line options
for sec in default_cfg.sections():
for name, _ in default_cfg.items(sec):
arg_name = ('%s_%s' % (sec, name)).replace('-', '_')
if hasattr(args, arg_name) and getattr(args, arg_name) is not None:
sys.stderr.write('!! CMDLine overwriting %s.%s:\n' % (sec, name))
sys.stderr.write(" '%s' => '%s'\n" % (default_cfg.get(sec, name),
getattr(args, arg_name)))
default_cfg.set(sec, name, getattr(args, arg_name))
args.config = default_cfg
sys.stderr.write("="*80+"\n")
return args
def get_checkpoint_path(args):
prefix = args.config.get('train', 'prefix')
if os.path.isabs(prefix):
return prefix
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'checkpoints', prefix))
def parse_contexts(args):
# parse context into Context objects
contexts = re.split(r'\W+', args.config.get('train', 'context'))
for i, ctx in enumerate(contexts):
if ctx[:3] == 'gpu':
contexts[i] = mx.context.gpu(int(ctx[3:]))
else:
contexts[i] = mx.context.cpu(int(ctx[3:]))
return contexts
| apache-2.0 |
johnny-die-tulpe/sauron | sauron/metrics/MySQLMetric.py | 2 | 2536 | #! /usr/bin/env python
#
# Copyright (c) 2011 SEOmoz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import pymysql
import datetime
from sauron import logger
from sauron.metrics import Metric, MetricException
class MySQLMetric(Metric):
def __init__(self, name, host=None, user=None, passwd=None, **kwargs):
Metric.__init__(self, name, **kwargs)
self.reconfig(name, host, user, passwd)
def reconfig(self, name, host=None, user=None, passwd=None, **kwargs):
Metric.reconfig(self, name, **kwargs)
self.name = name
self.host = host
self.user = user
self.passwd = passwd
self.conn = None
self.cur = None
def __del__(self):
try:
self.cur.close()
self.conn.close()
except AttributeError:
pass
def values(self):
try:
self.conn = pymysql.connect(host=self.host, user=self.user, passwd=self.passwd)
self.cur = self.conn.cursor()
self.cur.execute('show status')
r = dict(self.cur.fetchall())
return {
'results' : {
'uptime' : (r['Uptime'] , 'Seconds'),
'queries': (r['Queries'], 'Count')
}
}
except pymysql.err.MySQLError:
raise MetricException('Failed to connect to mySQL.')
except KeyError:
raise MetricException('Could not find all keys in mySQL status')
| mit |
huggingface/transformers | src/transformers/models/bert_generation/configuration_bert_generation.py | 2 | 6324 | # coding=utf-8
# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BertGeneration model configuration """
from ...configuration_utils import PretrainedConfig
class BertGenerationConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a
:class:`~transformers.BertGenerationPreTrainedModel`. It is used to instantiate a BertGeneration model according to
the specified arguments, defining the model architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 50358):
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.BertGeneration`.
hidden_size (:obj:`int`, `optional`, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 3072):
Dimensionality of the "intermediate" (often called feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`True`, use gradient checkpointing to save memory at the expense of slower backward pass.
position_embedding_type (:obj:`str`, `optional`, defaults to :obj:`"absolute"`):
Type of position embedding. Choose one of :obj:`"absolute"`, :obj:`"relative_key"`,
:obj:`"relative_key_query"`. For positional embeddings use :obj:`"absolute"`. For more information on
:obj:`"relative_key"`, please refer to `Self-Attention with Relative Position Representations (Shaw et al.)
<https://arxiv.org/abs/1803.02155>`__. For more information on :obj:`"relative_key_query"`, please refer to
`Method 4` in `Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)
<https://arxiv.org/abs/2009.13658>`__.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if ``config.is_decoder=True``.
Examples::
>>> from transformers import BertGenerationConfig, BertGenerationEncoder
>>> # Initializing a BertGeneration config
>>> configuration = BertGenerationConfig()
>>> # Initializing a model from the config
>>> model = BertGenerationEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
"""
model_type = "bert-generation"
def __init__(
self,
vocab_size=50358,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
bos_token_id=2,
eos_token_id=1,
gradient_checkpointing=False,
position_embedding_type="absolute",
use_cache=True,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.gradient_checkpointing = gradient_checkpointing
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
| apache-2.0 |
farhaanbukhsh/sympy | sympy/physics/quantum/circuitplot.py | 58 | 12941 | """Matplotlib based plotting of quantum circuits.
Todo:
* Optimize printing of large circuits.
* Get this to work with single gates.
* Do a better job checking the form of circuits to make sure it is a Mul of
Gates.
* Get multi-target gates plotting.
* Get initial and final states to plot.
* Get measurements to plot. Might need to rethink measurement as a gate
issue.
* Get scale and figsize to be handled in a better way.
* Write some tests/examples!
"""
from __future__ import print_function, division
from sympy import Mul
from sympy.core.compatibility import u, range
from sympy.external import import_module
from sympy.physics.quantum.gate import Gate, OneQubitGate, CGate, CGateS
from sympy.core.core import BasicMeta
from sympy.core.assumptions import ManagedProperties
__all__ = [
'CircuitPlot',
'circuit_plot',
'labeller',
'Mz',
'Mx',
'CreateOneQubitGate',
'CreateCGate',
]
np = import_module('numpy')
matplotlib = import_module(
'matplotlib', __import__kwargs={'fromlist': ['pyplot']},
catch=(RuntimeError,)) # This is raised in environments that have no display.
if not np or not matplotlib:
class CircuitPlot(object):
def __init__(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
def circuit_plot(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
else:
pyplot = matplotlib.pyplot
Line2D = matplotlib.lines.Line2D
Circle = matplotlib.patches.Circle
#from matplotlib import rc
#rc('text',usetex=True)
class CircuitPlot(object):
"""A class for managing a circuit plot."""
scale = 1.0
fontsize = 20.0
linewidth = 1.0
control_radius = 0.05
not_radius = 0.15
swap_delta = 0.05
labels = []
inits = {}
label_buffer = 0.5
def __init__(self, c, nqubits, **kwargs):
self.circuit = c
self.ngates = len(self.circuit.args)
self.nqubits = nqubits
self.update(kwargs)
self._create_grid()
self._create_figure()
self._plot_wires()
self._plot_gates()
self._finish()
def update(self, kwargs):
"""Load the kwargs into the instance dict."""
self.__dict__.update(kwargs)
def _create_grid(self):
"""Create the grid of wires."""
scale = self.scale
wire_grid = np.arange(0.0, self.nqubits*scale, scale, dtype=float)
gate_grid = np.arange(0.0, self.ngates*scale, scale, dtype=float)
self._wire_grid = wire_grid
self._gate_grid = gate_grid
def _create_figure(self):
"""Create the main matplotlib figure."""
self._figure = pyplot.figure(
figsize=(self.ngates*self.scale, self.nqubits*self.scale),
facecolor='w',
edgecolor='w'
)
ax = self._figure.add_subplot(
1, 1, 1,
frameon=True
)
ax.set_axis_off()
offset = 0.5*self.scale
ax.set_xlim(self._gate_grid[0] - offset, self._gate_grid[-1] + offset)
ax.set_ylim(self._wire_grid[0] - offset, self._wire_grid[-1] + offset)
ax.set_aspect('equal')
self._axes = ax
def _plot_wires(self):
"""Plot the wires of the circuit diagram."""
xstart = self._gate_grid[0]
xstop = self._gate_grid[-1]
xdata = (xstart - self.scale, xstop + self.scale)
for i in range(self.nqubits):
ydata = (self._wire_grid[i], self._wire_grid[i])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
if self.labels:
init_label_buffer = 0
if self.inits.get(self.labels[i]): init_label_buffer = 0.25
self._axes.text(
xdata[0]-self.label_buffer-init_label_buffer,ydata[0],
render_label(self.labels[i],self.inits),
size=self.fontsize,
color='k',ha='center',va='center')
self._plot_measured_wires()
def _plot_measured_wires(self):
ismeasured = self._measurements()
xstop = self._gate_grid[-1]
dy = 0.04 # amount to shift wires when doubled
# Plot doubled wires after they are measured
for im in ismeasured:
xdata = (self._gate_grid[ismeasured[im]],xstop+self.scale)
ydata = (self._wire_grid[im]+dy,self._wire_grid[im]+dy)
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
# Also double any controlled lines off these wires
for i,g in enumerate(self._gates()):
if isinstance(g, CGate) or isinstance(g, CGateS):
wires = g.controls + g.targets
for wire in wires:
if wire in ismeasured and \
self._gate_grid[i] > self._gate_grid[ismeasured[wire]]:
ydata = min(wires), max(wires)
xdata = self._gate_grid[i]-dy, self._gate_grid[i]-dy
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def _gates(self):
"""Create a list of all gates in the circuit plot."""
gates = []
if isinstance(self.circuit, Mul):
for g in reversed(self.circuit.args):
if isinstance(g, Gate):
gates.append(g)
elif isinstance(self.circuit, Gate):
gates.append(self.circuit)
return gates
def _plot_gates(self):
"""Iterate through the gates and plot each of them."""
for i, gate in enumerate(self._gates()):
gate.plot_gate(self, i)
def _measurements(self):
"""Return a dict {i:j} where i is the index of the wire that has
been measured, and j is the gate where the wire is measured.
"""
ismeasured = {}
for i,g in enumerate(self._gates()):
if getattr(g,'measurement',False):
for target in g.targets:
if target in ismeasured:
if ismeasured[target] > i:
ismeasured[target] = i
else:
ismeasured[target] = i
return ismeasured
def _finish(self):
# Disable clipping to make panning work well for large circuits.
for o in self._figure.findobj():
o.set_clip_on(False)
def one_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a single qubit gate."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def two_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a two qubit gate. Doesn't work yet.
"""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]+0.5
print(self._gate_grid)
print(self._wire_grid)
obj = self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def control_line(self, gate_idx, min_wire, max_wire):
"""Draw a vertical control line."""
xdata = (self._gate_grid[gate_idx], self._gate_grid[gate_idx])
ydata = (self._wire_grid[min_wire], self._wire_grid[max_wire])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def control_point(self, gate_idx, wire_idx):
"""Draw a control point."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.control_radius
c = Circle(
(x, y),
radius*self.scale,
ec='k',
fc='k',
fill=True,
lw=self.linewidth
)
self._axes.add_patch(c)
def not_point(self, gate_idx, wire_idx):
"""Draw a NOT gates as the circle with plus in the middle."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.not_radius
c = Circle(
(x, y),
radius,
ec='k',
fc='w',
fill=False,
lw=self.linewidth
)
self._axes.add_patch(c)
l = Line2D(
(x, x), (y - radius, y + radius),
color='k',
lw=self.linewidth
)
self._axes.add_line(l)
def swap_point(self, gate_idx, wire_idx):
"""Draw a swap point as a cross."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
d = self.swap_delta
l1 = Line2D(
(x - d, x + d),
(y - d, y + d),
color='k',
lw=self.linewidth
)
l2 = Line2D(
(x - d, x + d),
(y + d, y - d),
color='k',
lw=self.linewidth
)
self._axes.add_line(l1)
self._axes.add_line(l2)
def circuit_plot(c, nqubits, **kwargs):
"""Draw the circuit diagram for the circuit with nqubits.
Parameters
==========
c : circuit
The circuit to plot. Should be a product of Gate instances.
nqubits : int
The number of qubits to include in the circuit. Must be at least
as big as the largest `min_qubits`` of the gates.
"""
return CircuitPlot(c, nqubits, **kwargs)
def render_label(label, inits={}):
"""Slightly more flexible way to render labels.
>>> from sympy.physics.quantum.circuitplot import render_label
>>> render_label('q0')
'$|q0\\\\rangle$'
>>> render_label('q0', {'q0':'0'})
'$|q0\\\\rangle=|0\\\\rangle$'
"""
init = inits.get(label)
if init:
return r'$|%s\rangle=|%s\rangle$' % (label, init)
return r'$|%s\rangle$' % label
def labeller(n, symbol='q'):
"""Autogenerate labels for wires of quantum circuits.
Parameters
==========
n : int
number of qubits in the circuit
symbol : string
A character string to precede all gate labels. E.g. 'q_0', 'q_1', etc.
>>> from sympy.physics.quantum.circuitplot import labeller
>>> labeller(2)
['q_1', 'q_0']
>>> labeller(3,'j')
['j_2', 'j_1', 'j_0']
"""
return ['%s_%d' % (symbol,n-i-1) for i in range(n)]
class Mz(OneQubitGate):
"""Mock-up of a z measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mz'
gate_name_latex=u('M_z')
class Mx(OneQubitGate):
"""Mock-up of an x measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mx'
gate_name_latex=u('M_x')
class CreateOneQubitGate(ManagedProperties):
def __new__(mcl, name, latexname=None):
if not latexname:
latexname = name
return BasicMeta.__new__(mcl, name + "Gate", (OneQubitGate,),
{'gate_name': name, 'gate_name_latex': latexname})
def CreateCGate(name, latexname=None):
"""Use a lexical closure to make a controlled gate.
"""
if not latexname:
latexname = name
onequbitgate = CreateOneQubitGate(name, latexname)
def ControlledGate(ctrls,target):
return CGate(tuple(ctrls),onequbitgate(target))
return ControlledGate
| bsd-3-clause |
opbeat/opbeatcli | opbeatcli/client.py | 1 | 4139 | """
Opbeat log API client.
"""
import json
import logging
from opbeatcli import __version__
from opbeatcli.log import logger
from opbeatcli import settings
from opbeatcli.exceptions import ClientConnectionError, ClientHTTPError
from opbeatcli.compat import (Request, urlopen, URLError,
HTTPError, BaseHTTPRequestHandler)
HTTP_RESPONSE_CODES = BaseHTTPRequestHandler.responses
class OpbeatClient(object):
"""
The Opbeat client, which handles communication with the
Opbeat servers.
"""
def __init__(self, secret_token, organization_id, app_id,
server=settings.SERVER, timeout=settings.TIMEOUT,
dry_run=False):
self.server = server
self.secret_token = secret_token
self.organization_id = organization_id
self.app_id = app_id
self.timeout = timeout
self.dry_run = dry_run
self.logger = logger.getChild('client')
self.logger.info('Opbeat client configuration:')
for k in ['server', 'organization_id', 'app_id']:
self.logger.info(' %16s: %r' % (k, str(getattr(self, k))))
def log_request(self, uri, headers, payload):
self.logger.debug('> Server: %s', self.server)
self.logger.debug('> HTTP/1.1 POST %s', uri)
for header, value in headers.items():
self.logger.debug('> %s: %s', header, value)
self.logger.debug('> %s', payload)
def log_response(self, response, level=logging.DEBUG):
"""
:type response: HTTPResponse
"""
self.logger.log(level, '< HTTP %d %s',
response.code,
HTTP_RESPONSE_CODES[response.code][0])
body = response.read()
if body:
self.logger.log(level, '< %s', body)
def post(self, uri, data):
"""
HTTP POST ``data`` as JSON to collection identified by ``uri``.
:param uri:
The collection URI. It can be in the form of a URI template
with the variables {organization_id} and {app_id}, e.g.:
/api/{organization_id}/apps/{app_id}/deployments/
:param data: the data to be send
:type data: dict
"""
uri = uri.format(
organization_id=self.organization_id,
app_id=self.app_id
)
url = self.server + uri
headers = {
'User-Agent': 'opbeatcli/%s' % __version__,
'Authorization': 'Bearer %s' % self.secret_token,
'Content-Type': 'application/json',
}
payload = json.dumps(data, indent=2, sort_keys=True)
request = Request(
url=url,
headers=headers,
data=payload.encode('utf8')
)
self.log_request(uri, headers, payload)
if self.dry_run:
self.logger.info('Not sending because --dry-run.')
return
try:
response = urlopen(
request,
timeout=self.timeout,
)
# voidspace.org.uk/python/articles/urllib2.shtml#handling-exceptions
except HTTPError as e:
self.logger.error('< The server could not fulfill the request')
self.logger.debug('HTTP error', exc_info=True)
self.log_response(e, level=logging.ERROR)
raise ClientHTTPError(e.code)
except URLError as e: # Connection error.
try:
code, reason = e.reason.args
except ValueError:
code, reason = None, str(e)
if reason == 'Operation now in progress':
error_msg = 'request timed out (--timeout=%.2f)' % self.timeout
else:
error_msg = reason
self.logger.error('Unable to reach the API server: %s', error_msg)
self.logger.debug('URL error (connection error)', exc_info=0)
raise ClientConnectionError(error_msg)
except Exception:
raise # Unexpected error, not handled here.
else:
self.log_response(response, level=logging.DEBUG)
| bsd-3-clause |
Tesora/tesora-tempest | tempest/api/identity/admin/v3/test_projects.py | 2 | 9663 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class ProjectsTestJSON(base.BaseIdentityV3AdminTest):
@test.idempotent_id('0ecf465c-0dc4-4532-ab53-91ffeb74d12d')
def test_project_create_with_description(self):
# Create project with a description
project_name = data_utils.rand_name('project')
project_desc = data_utils.rand_name('desc')
project = self.projects_client.create_project(
project_name, description=project_desc)['project']
self.addCleanup(self.projects_client.delete_project, project['id'])
project_id = project['id']
desc1 = project['description']
self.assertEqual(desc1, project_desc, 'Description should have '
'been sent in response for create')
body = self.projects_client.show_project(project_id)['project']
desc2 = body['description']
self.assertEqual(desc2, project_desc, 'Description does not appear'
'to be set')
@test.idempotent_id('5f50fe07-8166-430b-a882-3b2ee0abe26f')
def test_project_create_with_domain(self):
# Create project with a domain
domain = self.setup_test_domain()
project_name = data_utils.rand_name('project')
project = self.projects_client.create_project(
project_name, domain_id=domain['id'])['project']
self.addCleanup(self.projects_client.delete_project, project['id'])
project_id = project['id']
self.assertEqual(project_name, project['name'])
self.assertEqual(domain['id'], project['domain_id'])
body = self.projects_client.show_project(project_id)['project']
self.assertEqual(project_name, body['name'])
self.assertEqual(domain['id'], body['domain_id'])
@testtools.skipUnless(CONF.identity_feature_enabled.reseller,
'Reseller not available.')
@test.idempotent_id('1854f9c0-70bc-4d11-a08a-1c789d339e3d')
def test_project_create_with_parent(self):
# Create root project without providing a parent_id
domain = self.setup_test_domain()
domain_id = domain['id']
root_project_name = data_utils.rand_name('root_project')
root_project = self.projects_client.create_project(
root_project_name, domain_id=domain_id)['project']
self.addCleanup(
self.projects_client.delete_project, root_project['id'])
root_project_id = root_project['id']
parent_id = root_project['parent_id']
self.assertEqual(root_project_name, root_project['name'])
# If not provided, the parent_id must point to the top level
# project in the hierarchy, i.e. its domain
self.assertEqual(domain_id, parent_id)
# Create a project using root_project_id as parent_id
project_name = data_utils.rand_name('project')
project = self.projects_client.create_project(
project_name, domain_id=domain_id,
parent_id=root_project_id)['project']
self.addCleanup(self.projects_client.delete_project, project['id'])
parent_id = project['parent_id']
self.assertEqual(project_name, project['name'])
self.assertEqual(root_project_id, parent_id)
@test.idempotent_id('1f66dc76-50cc-4741-a200-af984509e480')
def test_project_create_enabled(self):
# Create a project that is enabled
project_name = data_utils.rand_name('project')
project = self.projects_client.create_project(
project_name, enabled=True)['project']
self.addCleanup(self.projects_client.delete_project, project['id'])
project_id = project['id']
en1 = project['enabled']
self.assertTrue(en1, 'Enable should be True in response')
body = self.projects_client.show_project(project_id)['project']
en2 = body['enabled']
self.assertTrue(en2, 'Enable should be True in lookup')
@test.idempotent_id('78f96a9c-e0e0-4ee6-a3ba-fbf6dfd03207')
def test_project_create_not_enabled(self):
# Create a project that is not enabled
project_name = data_utils.rand_name('project')
project = self.projects_client.create_project(
project_name, enabled=False)['project']
self.addCleanup(self.projects_client.delete_project, project['id'])
en1 = project['enabled']
self.assertEqual('false', str(en1).lower(),
'Enable should be False in response')
body = self.projects_client.show_project(project['id'])['project']
en2 = body['enabled']
self.assertEqual('false', str(en2).lower(),
'Enable should be False in lookup')
@test.idempotent_id('f608f368-048c-496b-ad63-d286c26dab6b')
def test_project_update_name(self):
# Update name attribute of a project
p_name1 = data_utils.rand_name('project')
project = self.projects_client.create_project(p_name1)['project']
self.addCleanup(self.projects_client.delete_project, project['id'])
resp1_name = project['name']
p_name2 = data_utils.rand_name('project2')
body = self.projects_client.update_project(project['id'],
name=p_name2)['project']
resp2_name = body['name']
self.assertNotEqual(resp1_name, resp2_name)
body = self.projects_client.show_project(project['id'])['project']
resp3_name = body['name']
self.assertNotEqual(resp1_name, resp3_name)
self.assertEqual(p_name1, resp1_name)
self.assertEqual(resp2_name, resp3_name)
@test.idempotent_id('f138b715-255e-4a7d-871d-351e1ef2e153')
def test_project_update_desc(self):
# Update description attribute of a project
p_name = data_utils.rand_name('project')
p_desc = data_utils.rand_name('desc')
project = self.projects_client.create_project(
p_name, description=p_desc)['project']
self.addCleanup(self.projects_client.delete_project, project['id'])
resp1_desc = project['description']
p_desc2 = data_utils.rand_name('desc2')
body = self.projects_client.update_project(
project['id'], description=p_desc2)['project']
resp2_desc = body['description']
self.assertNotEqual(resp1_desc, resp2_desc)
body = self.projects_client.show_project(project['id'])['project']
resp3_desc = body['description']
self.assertNotEqual(resp1_desc, resp3_desc)
self.assertEqual(p_desc, resp1_desc)
self.assertEqual(resp2_desc, resp3_desc)
@test.idempotent_id('b6b25683-c97f-474d-a595-55d410b68100')
def test_project_update_enable(self):
# Update the enabled attribute of a project
p_name = data_utils.rand_name('project')
p_en = False
project = self.projects_client.create_project(p_name,
enabled=p_en)['project']
self.addCleanup(self.projects_client.delete_project, project['id'])
resp1_en = project['enabled']
p_en2 = True
body = self.projects_client.update_project(project['id'],
enabled=p_en2)['project']
resp2_en = body['enabled']
self.assertNotEqual(resp1_en, resp2_en)
body = self.projects_client.show_project(project['id'])['project']
resp3_en = body['enabled']
self.assertNotEqual(resp1_en, resp3_en)
self.assertEqual('false', str(resp1_en).lower())
self.assertEqual(resp2_en, resp3_en)
@test.idempotent_id('59398d4a-5dc5-4f86-9a4c-c26cc804d6c6')
def test_associate_user_to_project(self):
# Associate a user to a project
# Create a Project
p_name = data_utils.rand_name('project')
project = self.projects_client.create_project(p_name)['project']
self.addCleanup(self.projects_client.delete_project, project['id'])
# Create a User
u_name = data_utils.rand_name('user')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
u_password = data_utils.rand_password()
user = self.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, project_id=project['id'])['user']
# Delete the User at the end of this method
self.addCleanup(self.users_client.delete_user, user['id'])
# Get User To validate the user details
new_user_get = self.users_client.show_user(user['id'])['user']
# Assert response body of GET
self.assertEqual(u_name, new_user_get['name'])
self.assertEqual(u_desc, new_user_get['description'])
self.assertEqual(project['id'],
new_user_get['project_id'])
self.assertEqual(u_email, new_user_get['email'])
| apache-2.0 |
rduivenvoorde/QGIS | python/plugins/db_manager/db_plugins/postgis/plugin.py | 25 | 17115 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import map
from builtins import range
# this will disable the dbplugin if the connector raise an ImportError
from .connector import PostGisDBConnector
from qgis.PyQt.QtCore import Qt, QRegExp, QCoreApplication
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction, QApplication, QMessageBox
from qgis.core import Qgis, QgsApplication, QgsSettings
from qgis.gui import QgsMessageBar
from ..plugin import ConnectionError, InvalidDataException, DBPlugin, Database, Schema, Table, VectorTable, RasterTable, \
TableField, TableConstraint, TableIndex, TableTrigger, TableRule
import re
def classFactory():
return PostGisDBPlugin
class PostGisDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QgsApplication.getThemeIcon("/mIconPostgis.svg")
@classmethod
def typeName(self):
return 'postgis'
@classmethod
def typeNameString(self):
return QCoreApplication.translate('db_manager', 'PostGIS')
@classmethod
def providerName(self):
return 'postgres'
@classmethod
def connectionSettingsKey(self):
return '/PostgreSQL/connections'
def databasesFactory(self, connection, uri):
return PGDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QgsSettings()
settings.beginGroup(u"/%s/%s" % (self.connectionSettingsKey(), conn_name))
if not settings.contains("database"): # non-existent entry?
raise InvalidDataException(self.tr('There is no defined database connection "{0}".').format(conn_name))
from qgis.core import QgsDataSourceUri
uri = QgsDataSourceUri()
settingsList = ["service", "host", "port", "database", "username", "password", "authcfg"]
service, host, port, database, username, password, authcfg = [settings.value(x, "", type=str) for x in settingsList]
useEstimatedMetadata = settings.value("estimatedMetadata", False, type=bool)
try:
sslmode = settings.enumValue("sslmode", QgsDataSourceUri.SslPrefer)
except TypeError:
sslmode = QgsDataSourceUri.SslPrefer
settings.endGroup()
if hasattr(authcfg, 'isNull') and authcfg.isNull():
authcfg = ''
if service:
uri.setConnection(service, database, username, password, sslmode, authcfg)
else:
uri.setConnection(host, port, database, username, password, sslmode, authcfg)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
try:
return self.connectToUri(uri)
except ConnectionError:
return False
class PGDatabase(Database):
def __init__(self, connection, uri):
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return PostGisDBConnector(uri, self.connection())
def dataTablesFactory(self, row, db, schema=None):
return PGTable(row, db, schema)
def info(self):
from .info_model import PGDatabaseInfo
return PGDatabaseInfo(self)
def vectorTablesFactory(self, row, db, schema=None):
return PGVectorTable(row, db, schema)
def rasterTablesFactory(self, row, db, schema=None):
return PGRasterTable(row, db, schema)
def schemasFactory(self, row, db):
return PGSchema(row, db)
def sqlResultModel(self, sql, parent):
from .data_model import PGSqlResultModel
return PGSqlResultModel(self, sql, parent)
def sqlResultModelAsync(self, sql, parent):
from .data_model import PGSqlResultModelAsync
return PGSqlResultModelAsync(self, sql, parent)
def registerDatabaseActions(self, mainWindow):
Database.registerDatabaseActions(self, mainWindow)
# add a separator
separator = QAction(self)
separator.setSeparator(True)
mainWindow.registerAction(separator, self.tr("&Table"))
action = QAction(self.tr("Run &Vacuum Analyze"), self)
mainWindow.registerAction(action, self.tr("&Table"), self.runVacuumAnalyzeActionSlot)
action = QAction(self.tr("Run &Refresh Materialized View"), self)
mainWindow.registerAction(action, self.tr("&Table"), self.runRefreshMaterializedViewSlot)
def runVacuumAnalyzeActionSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, Table) or item.isView:
parent.infoBar.pushMessage(self.tr("Select a table for vacuum analyze."), Qgis.Info,
parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.runVacuumAnalyze()
def runRefreshMaterializedViewSlot(self, item, action, parent):
QApplication.restoreOverrideCursor()
try:
if not isinstance(item, PGTable) or item._relationType != 'm':
parent.infoBar.pushMessage(self.tr("Select a materialized view for refresh."), Qgis.Info,
parent.iface.messageTimeout())
return
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
item.runRefreshMaterializedView()
def hasLowercaseFieldNamesOption(self):
return True
def supportsComment(self):
return True
def executeSql(self, sql):
return self.connector._executeSql(sql)
class PGSchema(Schema):
def __init__(self, row, db):
Schema.__init__(self, db)
self.oid, self.name, self.owner, self.perms, self.comment = row
class PGTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, schema)
self.name, schema_name, self._relationType, self.owner, self.estimatedRowCount, self.pages, self.comment = row
self.isView = self._relationType in set(['v', 'm'])
self.estimatedRowCount = int(self.estimatedRowCount)
def runVacuumAnalyze(self):
self.aboutToChange.emit()
self.database().connector.runVacuumAnalyze((self.schemaName(), self.name))
# TODO: change only this item, not re-create all the tables in the schema/database
self.schema().refresh() if self.schema() else self.database().refresh()
def runRefreshMaterializedView(self):
self.aboutToChange.emit()
self.database().connector.runRefreshMaterializedView((self.schemaName(), self.name))
# TODO: change only this item, not re-create all the tables in the schema/database
self.schema().refresh() if self.schema() else self.database().refresh()
def runAction(self, action):
action = str(action)
if action.startswith("vacuumanalyze/"):
if action == "vacuumanalyze/run":
self.runVacuumAnalyze()
return True
elif action.startswith("rule/"):
parts = action.split('/')
rule_name = parts[1]
rule_action = parts[2]
msg = self.tr(u"Do you want to {0} rule {1}?").format(rule_action, rule_name)
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(None, self.tr("Table rule"), msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if rule_action == "delete":
self.aboutToChange.emit()
self.database().connector.deleteTableRule(rule_name, (self.schemaName(), self.name))
self.refreshRules()
return True
elif action.startswith("refreshmaterializedview/"):
if action == "refreshmaterializedview/run":
self.runRefreshMaterializedView()
return True
return Table.runAction(self, action)
def tableFieldsFactory(self, row, table):
return PGTableField(row, table)
def tableConstraintsFactory(self, row, table):
return PGTableConstraint(row, table)
def tableIndexesFactory(self, row, table):
return PGTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return PGTableTrigger(row, table)
def tableRulesFactory(self, row, table):
return PGTableRule(row, table)
def info(self):
from .info_model import PGTableInfo
return PGTableInfo(self)
def crs(self):
return self.database().connector.getCrs(self.srid)
def tableDataModel(self, parent):
from .data_model import PGTableDataModel
return PGTableDataModel(self, parent)
def delete(self):
self.aboutToChange.emit()
if self.isView:
ret = self.database().connector.deleteView((self.schemaName(), self.name), self._relationType == 'm')
else:
ret = self.database().connector.deleteTable((self.schemaName(), self.name))
if not ret:
self.deleted.emit()
return ret
class PGVectorTable(PGTable, VectorTable):
def __init__(self, row, db, schema=None):
PGTable.__init__(self, row[:-4], db, schema)
VectorTable.__init__(self, db, schema)
self.geomColumn, self.geomType, self.geomDim, self.srid = row[-4:]
def info(self):
from .info_model import PGVectorTableInfo
return PGVectorTableInfo(self)
def runAction(self, action):
if PGTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
class PGRasterTable(PGTable, RasterTable):
def __init__(self, row, db, schema=None):
PGTable.__init__(self, row[:-6], db, schema)
RasterTable.__init__(self, db, schema)
self.geomColumn, self.pixelType, self.pixelSizeX, self.pixelSizeY, self.isExternal, self.srid = row[-6:]
self.geomType = 'RASTER'
def info(self):
from .info_model import PGRasterTableInfo
return PGRasterTableInfo(self)
def uri(self, uri=None):
"""Returns the datasource URI for postgresraster provider"""
if not uri:
uri = self.database().uri()
service = (u'service=\'%s\'' % uri.service()) if uri.service() else ''
dbname = (u'dbname=\'%s\'' % uri.database()) if uri.database() else ''
host = (u'host=%s' % uri.host()) if uri.host() else ''
user = (u'user=%s' % uri.username()) if uri.username() else ''
passw = (u'password=%s' % uri.password()) if uri.password() else ''
port = (u'port=%s' % uri.port()) if uri.port() else ''
schema = self.schemaName() if self.schemaName() else 'public'
table = '"%s"."%s"' % (schema, self.name)
if not dbname:
# postgresraster provider *requires* a dbname
connector = self.database().connector
r = connector._execute(None, "SELECT current_database()")
dbname = (u'dbname=\'%s\'' % connector._fetchone(r)[0])
connector._close_cursor(r)
# Find first raster field
col = ''
for fld in self.fields():
if fld.dataType == "raster":
col = u'column=\'%s\'' % fld.name
break
uri = u'%s %s %s %s %s %s %s table=%s' % \
(service, dbname, host, user, passw, port, col, table)
return uri
def mimeUri(self):
uri = u"raster:postgresraster:{}:{}".format(self.name, re.sub(":", r"\:", self.uri()))
return uri
def toMapLayer(self):
from qgis.core import QgsRasterLayer, QgsContrastEnhancement, QgsDataSourceUri, QgsCredentials
rl = QgsRasterLayer(self.uri(), self.name, "postgresraster")
if not rl.isValid():
err = rl.error().summary()
uri = QgsDataSourceUri(self.database().uri())
conninfo = uri.connectionInfo(False)
username = uri.username()
password = uri.password()
for i in range(3):
(ok, username, password) = QgsCredentials.instance().get(conninfo, username, password, err)
if ok:
uri.setUsername(username)
uri.setPassword(password)
rl = QgsRasterLayer(self.uri(uri), self.name)
if rl.isValid():
break
if rl.isValid():
rl.setContrastEnhancement(QgsContrastEnhancement.StretchToMinimumMaximum)
return rl
class PGTableField(TableField):
def __init__(self, row, table):
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.charMaxLen, self.modifier, self.notNull, self.hasDefault, self.default, typeStr = row
self.primaryKey = False
# get modifier (e.g. "precision,scale") from formatted type string
trimmedTypeStr = typeStr.strip()
regex = QRegExp("\\((.+)\\)$")
startpos = regex.indexIn(trimmedTypeStr)
if startpos >= 0:
self.modifier = regex.cap(1).strip()
else:
self.modifier = None
# find out whether fields are part of primary key
for con in self.table().constraints():
if con.type == TableConstraint.TypePrimaryKey and self.num in con.columns:
self.primaryKey = True
break
def getComment(self):
"""Returns the comment for a field"""
tab = self.table()
# SQL Query checking if a comment exists for the field
sql_cpt = "Select count(*) from pg_description pd, pg_class pc, pg_attribute pa where relname = '%s' and attname = '%s' and pa.attrelid = pc.oid and pd.objoid = pc.oid and pd.objsubid = pa.attnum" % (tab.name, self.name)
# SQL Query that return the comment of the field
sql = "Select pd.description from pg_description pd, pg_class pc, pg_attribute pa where relname = '%s' and attname = '%s' and pa.attrelid = pc.oid and pd.objoid = pc.oid and pd.objsubid = pa.attnum" % (tab.name, self.name)
c = tab.database().connector._execute(None, sql_cpt) # Execute Check query
res = tab.database().connector._fetchone(c)[0] # Store result
if res == 1:
# When a comment exists
c = tab.database().connector._execute(None, sql) # Execute query
res = tab.database().connector._fetchone(c)[0] # Store result
tab.database().connector._close_cursor(c) # Close cursor
return res # Return comment
else:
return ''
class PGTableConstraint(TableConstraint):
def __init__(self, row, table):
TableConstraint.__init__(self, table)
self.name, constr_type_str, self.isDefferable, self.isDeffered, columns = row[:5]
self.columns = list(map(int, columns.split(' ')))
if constr_type_str in TableConstraint.types:
self.type = TableConstraint.types[constr_type_str]
else:
self.type = TableConstraint.TypeUnknown
if self.type == TableConstraint.TypeCheck:
self.checkSource = row[5]
elif self.type == TableConstraint.TypeForeignKey:
self.foreignTable = row[6]
self.foreignOnUpdate = TableConstraint.onAction[row[7]]
self.foreignOnDelete = TableConstraint.onAction[row[8]]
self.foreignMatchType = TableConstraint.matchTypes[row[9]]
self.foreignKeys = row[10]
class PGTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.name, columns, self.isUnique = row
self.columns = list(map(int, columns.split(' ')))
class PGTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.function, self.type, self.enabled = row
class PGTableRule(TableRule):
def __init__(self, row, table):
TableRule.__init__(self, table)
self.name, self.definition = row
| gpl-2.0 |
usersource/tasks | tasks_phonegap/Tasks/plugins/io.usersource.anno/tools/copytool2/apiclient/http.py | 102 | 52847 | # Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import StringIO
import base64
import copy
import gzip
import httplib2
import logging
import mimeparse
import mimetypes
import os
import random
import sys
import time
import urllib
import urlparse
import uuid
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.parser import FeedParser
from errors import BatchError
from errors import HttpError
from errors import InvalidChunkSizeError
from errors import ResumableUploadError
from errors import UnexpectedBodyError
from errors import UnexpectedMethodError
from model import JsonModel
from oauth2client import util
from oauth2client.anyjson import simplejson
DEFAULT_CHUNK_SIZE = 512*1024
MAX_URI_LENGTH = 2048
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload, or None if the total
upload size isn't known ahead of time.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the upload is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaDownloadProgress(object):
"""Status of a resumable download."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes received so far.
total_size: int, total bytes in complete download.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of download completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the download is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
Note that subclasses of MediaUpload may allow you to control the chunksize
when uploading a media object. It is important to keep the size of the chunk
as large as possible to keep the upload efficient. Other factors may influence
the size of the chunk you use, particularly if you are working in an
environment where individual HTTP requests may have a hardcoded time limit,
such as under certain classes of requests under Google App Engine.
Streams are io.Base compatible objects that support seek(). Some MediaUpload
subclasses support using streams directly to upload data. Support for
streaming may be indicated by a MediaUpload sub-class and if appropriate for a
platform that stream will be used for uploading the media object. The support
for streaming is indicated by has_stream() returning True. The stream() method
should return an io.Base object that supports seek(). On platforms where the
underlying httplib module supports streaming, for example Python 2.6 and
later, the stream will be passed into the http library which will result in
less memory being used and possibly faster uploads.
If you need to upload media that can't be uploaded using any of the existing
MediaUpload sub-class then you can sub-class MediaUpload for your particular
needs.
"""
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
raise NotImplementedError()
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return 'application/octet-stream'
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return None
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return False
def getbytes(self, begin, end):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
raise NotImplementedError()
def has_stream(self):
"""Does the underlying upload support a streaming interface.
Streaming means it is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
Returns:
True if the call to stream() will return an instance of a seekable io.Base
subclass.
"""
return False
def stream(self):
"""A stream interface to the data being uploaded.
Returns:
The returned value is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
"""
raise NotImplementedError()
@util.positional(1)
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaIoBaseUpload(MediaUpload):
"""A MediaUpload for a io.Base objects.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
fh = io.BytesIO('...Some data to upload...')
media = MediaIoBaseUpload(fh, mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
Depending on the platform you are working on, you may pass -1 as the
chunksize, which indicates that the entire file should be uploaded in a single
request. If the underlying platform supports streams, such as Python 2.6 or
later, then this can be very efficient as it avoids multiple connections, and
also avoids loading the entire file into memory before sending it. Note that
Google App Engine has a 5MB limit on request size, so you should never set
your chunksize larger than 5MB, or to -1.
"""
@util.positional(3)
def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
fd: io.Base or file object, The source of the bytes to upload. MUST be
opened in blocking mode, do not use streams opened in non-blocking mode.
The given stream must be seekable, that is, it must be able to call
seek() on fd.
mimetype: string, Mime-type of the file.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True. Pass in a value of -1 if the file is to be
uploaded as a single chunk. Note that Google App Engine has a 5MB limit
on request size, so you should never set your chunksize larger than 5MB,
or to -1.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
super(MediaIoBaseUpload, self).__init__()
self._fd = fd
self._mimetype = mimetype
if not (chunksize == -1 or chunksize > 0):
raise InvalidChunkSizeError()
self._chunksize = chunksize
self._resumable = resumable
self._fd.seek(0, os.SEEK_END)
self._size = self._fd.tell()
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
self._fd.seek(begin)
return self._fd.read(length)
def has_stream(self):
"""Does the underlying upload support a streaming interface.
Streaming means it is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
Returns:
True if the call to stream() will return an instance of a seekable io.Base
subclass.
"""
return True
def stream(self):
"""A stream interface to the data being uploaded.
Returns:
The returned value is an io.IOBase subclass that supports seek, i.e.
seekable() returns True.
"""
return self._fd
def to_json(self):
"""This upload type is not serializable."""
raise NotImplementedError('MediaIoBaseUpload is not serializable.')
class MediaFileUpload(MediaIoBaseUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
Depending on the platform you are working on, you may pass -1 as the
chunksize, which indicates that the entire file should be uploaded in a single
request. If the underlying platform supports streams, such as Python 2.6 or
later, then this can be very efficient as it avoids multiple connections, and
also avoids loading the entire file into memory before sending it. Note that
Google App Engine has a 5MB limit on request size, so you should never set
your chunksize larger than 5MB, or to -1.
"""
@util.positional(2)
def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True. Pass in a value of -1 if the file is to be
uploaded in a single chunk. Note that Google App Engine has a 5MB limit
on request size, so you should never set your chunksize larger than 5MB,
or to -1.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._filename = filename
fd = open(self._filename, 'rb')
if mimetype is None:
(mimetype, encoding) = mimetypes.guess_type(filename)
super(MediaFileUpload, self).__init__(fd, mimetype, chunksize=chunksize,
resumable=resumable)
def to_json(self):
"""Creating a JSON representation of an instance of MediaFileUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(strip=['_fd'])
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaFileUpload(d['_filename'], mimetype=d['_mimetype'],
chunksize=d['_chunksize'], resumable=d['_resumable'])
class MediaInMemoryUpload(MediaIoBaseUpload):
"""MediaUpload for a chunk of bytes.
DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
the stream.
"""
@util.positional(2)
def __init__(self, body, mimetype='application/octet-stream',
chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Create a new MediaInMemoryUpload.
DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
the stream.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
fd = StringIO.StringIO(body)
super(MediaInMemoryUpload, self).__init__(fd, mimetype, chunksize=chunksize,
resumable=resumable)
class MediaIoBaseDownload(object):
""""Download media resources.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
Example:
request = farms.animals().get_media(id='cow')
fh = io.FileIO('cow.png', mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print "Download %d%%." % int(status.progress() * 100)
print "Download Complete!"
"""
@util.positional(3)
def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
"""Constructor.
Args:
fd: io.Base or file object, The stream in which to write the downloaded
bytes.
request: apiclient.http.HttpRequest, the media request to perform in
chunks.
chunksize: int, File will be downloaded in chunks of this many bytes.
"""
self._fd = fd
self._request = request
self._uri = request.uri
self._chunksize = chunksize
self._progress = 0
self._total_size = None
self._done = False
# Stubs for testing.
self._sleep = time.sleep
self._rand = random.random
@util.positional(1)
def next_chunk(self, num_retries=0):
"""Get the next chunk of the download.
Args:
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self._progress, self._progress + self._chunksize)
}
http = self._request.http
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for media download: GET %s, following status: %d'
% (retry_num, self._uri, resp.status))
resp, content = http.request(self._uri, headers=headers)
if resp.status < 500:
break
if resp.status in [200, 206]:
if 'content-location' in resp and resp['content-location'] != self._uri:
self._uri = resp['content-location']
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
if self._progress == self._total_size:
self._done = True
return MediaDownloadProgress(self._progress, self._total_size), self._done
else:
raise HttpError(resp, content, uri=self._uri)
class _StreamSlice(object):
"""Truncated stream.
Takes a stream and presents a stream that is a slice of the original stream.
This is used when uploading media in chunks. In later versions of Python a
stream can be passed to httplib in place of the string of data to send. The
problem is that httplib just blindly reads to the end of the stream. This
wrapper presents a virtual stream that only reads to the end of the chunk.
"""
def __init__(self, stream, begin, chunksize):
"""Constructor.
Args:
stream: (io.Base, file object), the stream to wrap.
begin: int, the seek position the chunk begins at.
chunksize: int, the size of the chunk.
"""
self._stream = stream
self._begin = begin
self._chunksize = chunksize
self._stream.seek(begin)
def read(self, n=-1):
"""Read n bytes.
Args:
n, int, the number of bytes to read.
Returns:
A string of length 'n', or less if EOF is reached.
"""
# The data left available to read sits in [cur, end)
cur = self._stream.tell()
end = self._begin + self._chunksize
if n == -1 or cur + n > end:
n = end - cur
return self._stream.read(n)
class HttpRequest(object):
"""Encapsulates a single HTTP request."""
@util.positional(4)
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
self.response_callbacks = []
self._in_error_state = False
# Pull the multipart boundary out of the content-type header.
major, minor, params = mimeparse.parse_mime_type(
headers.get('content-type', 'application/json'))
# The size of the non-media part of the request.
self.body_size = len(self.body or '')
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
# Stubs for testing.
self._rand = random.random
self._sleep = time.sleep
@util.positional(1)
def execute(self, http=None, num_retries=0):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http=http, num_retries=num_retries)
return body
# Non-resumable case.
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
# If the request URI is too long then turn it into a POST request.
if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET':
self.method = 'POST'
self.headers['x-http-method-override'] = 'GET'
self.headers['content-type'] = 'application/x-www-form-urlencoded'
parsed = urlparse.urlparse(self.uri)
self.uri = urlparse.urlunparse(
(parsed.scheme, parsed.netloc, parsed.path, parsed.params, None,
None)
)
self.body = parsed.query
self.headers['content-length'] = str(len(self.body))
# Handle retries for server-side errors.
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning('Retry #%d for request: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
resp, content = http.request(str(self.uri), method=str(self.method),
body=self.body, headers=self.headers)
if resp.status < 500:
break
for callback in self.response_callbacks:
callback(resp)
if resp.status >= 300:
raise HttpError(resp, content, uri=self.uri)
return self.postproc(resp, content)
@util.positional(2)
def add_response_callback(self, cb):
"""add_response_headers_callback
Args:
cb: Callback to be called on receiving the response headers, of signature:
def cb(resp):
# Where resp is an instance of httplib2.Response
"""
self.response_callbacks.append(cb)
@util.positional(1)
def next_chunk(self, http=None, num_retries=0):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and
the MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1000, resumable=True)
request = farm.animals().insert(
id='cow',
name='cow.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable.size() is None:
size = '*'
else:
size = str(self.resumable.size())
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
if size != '*':
start_headers['X-Upload-Content-Length'] = size
start_headers['content-length'] = str(self.body_size)
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for resumable URI request: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
resp, content = http.request(self.uri, method=self.method,
body=self.body,
headers=start_headers)
if resp.status < 500:
break
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError(resp, content)
elif self._in_error_state:
# If we are in an error state then query the server for current state of
# the upload by sending an empty PUT and reading the 'range' header in
# the response.
headers = {
'Content-Range': 'bytes */%s' % size,
'content-length': '0'
}
resp, content = http.request(self.resumable_uri, 'PUT',
headers=headers)
status, body = self._process_response(resp, content)
if body:
# The upload was complete.
return (status, body)
# The httplib.request method can take streams for the body parameter, but
# only in Python 2.6 or later. If a stream is available under those
# conditions then use it as the body argument.
if self.resumable.has_stream() and sys.version_info[1] >= 6:
data = self.resumable.stream()
if self.resumable.chunksize() == -1:
data.seek(self.resumable_progress)
chunk_end = self.resumable.size() - self.resumable_progress - 1
else:
# Doing chunking with a stream, so wrap a slice of the stream.
data = _StreamSlice(data, self.resumable_progress,
self.resumable.chunksize())
chunk_end = min(
self.resumable_progress + self.resumable.chunksize() - 1,
self.resumable.size() - 1)
else:
data = self.resumable.getbytes(
self.resumable_progress, self.resumable.chunksize())
# A short read implies that we are at EOF, so finish the upload.
if len(data) < self.resumable.chunksize():
size = str(self.resumable_progress + len(data))
chunk_end = self.resumable_progress + len(data) - 1
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
self.resumable_progress, chunk_end, size),
# Must set the content-length header here because httplib can't
# calculate the size when working with _StreamSlice.
'Content-Length': str(chunk_end - self.resumable_progress + 1)
}
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for media upload: %s %s, following status: %d'
% (retry_num, self.method, self.uri, resp.status))
try:
resp, content = http.request(self.resumable_uri, method='PUT',
body=data,
headers=headers)
except:
self._in_error_state = True
raise
if resp.status < 500:
break
return self._process_response(resp, content)
def _process_response(self, resp, content):
"""Process the response from a single chunk upload.
Args:
resp: httplib2.Response, the response object.
content: string, the content of the response.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx or a 308.
"""
if resp.status in [200, 201]:
self._in_error_state = False
return None, self.postproc(resp, content)
elif resp.status == 308:
self._in_error_state = False
# A "308 Resume Incomplete" indicates we are not done.
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
if 'location' in resp:
self.resumable_uri = resp['location']
else:
self._in_error_state = True
raise HttpError(resp, content, uri=self.uri)
return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
None)
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
del d['_sleep']
del d['_rand']
return simplejson.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = simplejson.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri=d['uri'],
method=d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class BatchHttpRequest(object):
"""Batches multiple HttpRequest objects into a single HTTP request.
Example:
from apiclient.http import BatchHttpRequest
def list_animals(request_id, response, exception):
\"\"\"Do something with the animals list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
def list_farmers(request_id, response, exception):
\"\"\"Do something with the farmers list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
service = build('farm', 'v2')
batch = BatchHttpRequest()
batch.add(service.animals().list(), list_animals)
batch.add(service.farmers().list(), list_farmers)
batch.execute(http=http)
"""
@util.positional(1)
def __init__(self, callback=None, batch_uri=None):
"""Constructor for a BatchHttpRequest.
Args:
callback: callable, A callback to be called for each response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no error occurred.
batch_uri: string, URI to send batch requests to.
"""
if batch_uri is None:
batch_uri = 'https://www.googleapis.com/batch'
self._batch_uri = batch_uri
# Global callback to be called for each individual response in the batch.
self._callback = callback
# A map from id to request.
self._requests = {}
# A map from id to callback.
self._callbacks = {}
# List of request ids, in the order in which they were added.
self._order = []
# The last auto generated id.
self._last_auto_id = 0
# Unique ID on which to base the Content-ID headers.
self._base_id = None
# A map from request id to (httplib2.Response, content) response pairs
self._responses = {}
# A map of id(Credentials) that have been refreshed.
self._refreshed_credentials = {}
def _refresh_and_apply_credentials(self, request, http):
"""Refresh the credentials and apply to the request.
Args:
request: HttpRequest, the request.
http: httplib2.Http, the global http object for the batch.
"""
# For the credentials to refresh, but only once per refresh_token
# If there is no http per the request then refresh the http passed in
# via execute()
creds = None
if request.http is not None and hasattr(request.http.request,
'credentials'):
creds = request.http.request.credentials
elif http is not None and hasattr(http.request, 'credentials'):
creds = http.request.credentials
if creds is not None:
if id(creds) not in self._refreshed_credentials:
creds.refresh(http)
self._refreshed_credentials[id(creds)] = 1
# Only apply the credentials if we are using the http object passed in,
# otherwise apply() will get called during _serialize_request().
if request.http is None or not hasattr(request.http.request,
'credentials'):
creds.apply(request.headers)
def _id_to_header(self, id_):
"""Convert an id to a Content-ID header value.
Args:
id_: string, identifier of individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
if self._base_id is None:
self._base_id = uuid.uuid4()
return '<%s+%s>' % (self._base_id, urllib.quote(id_))
def _header_to_id(self, header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that _id_to_header()
returns.
Args:
header: string, Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if header[0] != '<' or header[-1] != '>':
raise BatchError("Invalid value for Content-ID: %s" % header)
if '+' not in header:
raise BatchError("Invalid value for Content-ID: %s" % header)
base, id_ = header[1:-1].rsplit('+', 1)
return urllib.unquote(id_)
def _serialize_request(self, request):
"""Convert an HttpRequest object into a string.
Args:
request: HttpRequest, the request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse.urlparse(request.uri)
request_line = urlparse.urlunparse(
(None, None, parsed.path, parsed.params, parsed.query, None)
)
status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get('content-type', 'application/json').split('/')
msg = MIMENonMultipart(major, minor)
headers = request.headers.copy()
if request.http is not None and hasattr(request.http.request,
'credentials'):
request.http.request.credentials.apply(headers)
# MIMENonMultipart adds its own Content-Type header.
if 'content-type' in headers:
del headers['content-type']
for key, value in headers.iteritems():
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
msg['content-length'] = str(len(request.body))
# Serialize the mime message.
fp = StringIO.StringIO()
# maxheaderlen=0 means don't line wrap headers.
g = Generator(fp, maxheaderlen=0)
g.flatten(msg, unixfrom=False)
body = fp.getvalue()
# Strip off the \n\n that the MIME lib tacks onto the end of the payload.
if request.body is None:
body = body[:-2]
return status_line.encode('utf-8') + body
def _deserialize_response(self, payload):
"""Convert string into httplib2 response and content.
Args:
payload: string, headers and body as a string.
Returns:
A pair (resp, content), such as would be returned from httplib2.request.
"""
# Strip off the status line
status_line, payload = payload.split('\n', 1)
protocol, status, reason = status_line.split(' ', 2)
# Parse the rest of the response
parser = FeedParser()
parser.feed(payload)
msg = parser.close()
msg['status'] = status
# Create httplib2.Response from the parsed headers.
resp = httplib2.Response(msg)
resp.reason = reason
resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
content = payload.split('\r\n\r\n', 1)[1]
return resp, content
def _new_id(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
string, a new unique id.
"""
self._last_auto_id += 1
while str(self._last_auto_id) in self._requests:
self._last_auto_id += 1
return str(self._last_auto_id)
@util.positional(2)
def add(self, request, callback=None, request_id=None):
"""Add a new request.
Every callback added will be paired with a unique id, the request_id. That
unique id will be passed back to the callback when the response comes back
from the server. The default behavior is to have the library generate it's
own unique id. If the caller passes in a request_id then they must ensure
uniqueness for each request_id, and if they are not an exception is
raised. Callers should either supply all request_ids or nevery supply a
request id, to avoid such an error.
Args:
request: HttpRequest, Request to add to the batch.
callback: callable, A callback to be called for this response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors occurred.
request_id: string, A unique id for the request. The id will be passed to
the callback with the response.
Returns:
None
Raises:
BatchError if a media request is added to a batch.
KeyError is the request_id is not unique.
"""
if request_id is None:
request_id = self._new_id()
if request.resumable is not None:
raise BatchError("Media requests cannot be used in a batch request.")
if request_id in self._requests:
raise KeyError("A request with this ID already exists: %s" % request_id)
self._requests[request_id] = request
self._callbacks[request_id] = callback
self._order.append(request_id)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
body = message.as_string()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, method='POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, uri=self._batch_uri)
# Now break out the individual responses and store each one.
boundary, _ = content.split(None, 1)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp=resp,
content=content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
response, content = self._deserialize_response(part.get_payload())
self._responses[request_id] = (response, content)
@util.positional(1)
def execute(self, http=None):
"""Execute all the requests as a single batched HTTP request.
Args:
http: httplib2.Http, an http object to be used in place of the one the
HttpRequest request object was constructed with. If one isn't supplied
then use a http object from the requests in this batch.
Returns:
None
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
# If http is not supplied use the first valid one given in the requests.
if http is None:
for request_id in self._order:
request = self._requests[request_id]
if request is not None:
http = request.http
break
if http is None:
raise ValueError("Missing a valid http object.")
self._execute(http, self._order, self._requests)
# Loop over all the requests and check for 401s. For each 401 request the
# credentials should be refreshed and then sent again in a separate batch.
redo_requests = {}
redo_order = []
for request_id in self._order:
resp, content = self._responses[request_id]
if resp['status'] == '401':
redo_order.append(request_id)
request = self._requests[request_id]
self._refresh_and_apply_credentials(request, http)
redo_requests[request_id] = request
if redo_requests:
self._execute(http, redo_order, redo_requests)
# Now process all callbacks that are erroring, and raise an exception for
# ones that return a non-2xx response? Or add extra parameter to callback
# that contains an HttpError?
for request_id in self._order:
resp, content = self._responses[request_id]
request = self._requests[request_id]
callback = self._callbacks[request_id]
response = None
exception = None
try:
if resp.status >= 300:
raise HttpError(resp, content, uri=request.uri)
response = request.postproc(resp, content)
except HttpError, e:
exception = e
if callback is not None:
callback(request_id, response, exception)
if self._callback is not None:
self._callback(request_id, response, exception)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
apiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion
if check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = simplejson.loads(expected_body)
body = simplejson.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId=methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename=None, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
if filename:
f = file(filename, 'r')
self.data = f.read()
f.close()
else:
self.data = None
self.response_headers = headers
self.headers = None
self.uri = None
self.method = None
self.body = None
self.headers = None
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
self.uri = uri
self.method = method
self.body = body
self.headers = headers
return httplib2.Response(self.response_headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.follow_redirects = True
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
if hasattr(body, 'read'):
content = body.read()
else:
content = body
elif content == 'echo_request_uri':
content = uri
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| mpl-2.0 |
kylerbrown/bark | tests/test_spikes.py | 2 | 2232 | import numpy as np
import bark
import pytest
from scipy.signal import argrelextrema
from bark.tools.datspike import main, spikes, thres_extrema, stream_spikes
data0 = np.array([[0], [1], [0], ])
data1 = np.array([[0, 0], [1, 0], [0, 0], ])
data2 = np.array([[0], [1], [1], [0], ])
def test_thres_extrema():
rs, = argrelextrema(data1[:, 0], lambda x, y: thres_extrema(x, y, 0.1))
assert len(rs) == 1
assert rs == 1
def test_spikes_1():
spks = list(spikes(data1,
start_sample=0,
threshs=[0.1, 0.1],
pad_len=0,
order=1))
assert len(spks) == 1
assert spks[0][0] == 0
assert spks[0][1] == 1
def test_stream_spikes():
data = np.arange(100).reshape(-1, 1) % 10
spikes = np.array(list(stream_spikes(
bark.stream.Stream(data,
sr=4,
chunksize=6),
threshs=[0.1],
pad_len=5,
order=3)))
answer = np.arange(9, 100, 10)
assert len(spikes) == len(answer)
assert spikes[0][0] == 0
assert spikes[0][1] == 9
assert np.allclose(spikes[:, 1], np.arange(9, 100, 10))
def test_min_distance():
# same as stream test, but only every other peak should be detected
# do to a minimum distance of 15
data = np.arange(100).reshape(-1, 1) % 10
spikes = np.array(list(stream_spikes(
bark.stream.Stream(data,
sr=4,
chunksize=6),
threshs=[0.1],
pad_len=5,
order=3,
min_dist=15)))
answer = np.arange(9, 100, 20)
assert len(spikes) == len(answer)
assert spikes[0][0] == 0
assert spikes[0][1] == 9
assert np.allclose(spikes[:, 1], np.arange(9, 100, 20))
def test_main(tmpdir):
csvfile = str(tmpdir.join('test.csv'))
datfile = str(tmpdir.join('test.dat'))
data = np.arange(100).reshape(-1, 1) % 10
bark.write_sampled(datfile, data, sampling_rate=10)
main(datfile, csvfile, .1, 3)
result = bark.read_events(csvfile)
assert 'start' in result.data.columns
assert 'channel' in result.data.columns
assert np.allclose(result.data.start, np.arange(9, 100, 10)/10)
| gpl-2.0 |
qtproject/qt-labs-qbs | src/3rdparty/python/lib/python2.7/site-packages/ds_store/store.py | 11 | 45209 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import binascii
import struct
import biplist
import mac_alias
try:
next
except NameError:
next = lambda x: x.next()
try:
unicode
except NameError:
unicode = str
from . import buddy
class ILocCodec(object):
@staticmethod
def encode(point):
return struct.pack(b'>IIII', point[0], point[1],
0xffffffff, 0xffff0000)
@staticmethod
def decode(bytesData):
if isinstance(bytesData, bytearray):
x, y = struct.unpack_from(b'>II', bytes(bytesData[:8]))
else:
x, y = struct.unpack(b'>II', bytesData[:8])
return (x, y)
class PlistCodec(object):
@staticmethod
def encode(plist):
return biplist.writePlistToString(plist)
@staticmethod
def decode(bytes):
return biplist.readPlistFromString(bytes)
class BookmarkCodec(object):
@staticmethod
def encode(bmk):
return bmk.to_bytes()
@staticmethod
def decode(bytes):
return mac_alias.Bookmark.from_bytes(bytes)
# This list tells the code how to decode particular kinds of entry in the
# .DS_Store file. This is really a convenience, and we currently only
# support a tiny subset of the possible entry types.
codecs = {
b'Iloc': ILocCodec,
b'bwsp': PlistCodec,
b'lsvp': PlistCodec,
b'lsvP': PlistCodec,
b'icvp': PlistCodec,
b'pBBk': BookmarkCodec
}
class DSStoreEntry(object):
"""Holds the data from an entry in a ``.DS_Store`` file. Note that this is
not meant to represent the entry itself---i.e. if you change the type
or value, your changes will *not* be reflected in the underlying file.
If you want to make a change, you should either use the :class:`DSStore`
object's :meth:`DSStore.insert` method (which will replace a key if it
already exists), or the mapping access mode for :class:`DSStore` (often
simpler anyway).
"""
def __init__(self, filename, code, typecode, value=None):
if str != bytes and type(filename) == bytes:
filename = filename.decode('utf-8')
if not isinstance(code, bytes):
code = code.encode('latin_1')
self.filename = filename
self.code = code
self.type = typecode
self.value = value
@classmethod
def read(cls, block):
"""Read a ``.DS_Store`` entry from the containing Block"""
# First read the filename
nlen = block.read(b'>I')[0]
filename = block.read(2 * nlen).decode('utf-16be')
# Next, read the code and type
code, typecode = block.read(b'>4s4s')
# Finally, read the data
if typecode == b'bool':
value = block.read(b'>?')[0]
elif typecode == b'long' or typecode == b'shor':
value = block.read(b'>I')[0]
elif typecode == b'blob':
vlen = block.read(b'>I')[0]
value = block.read(vlen)
codec = codecs.get(code, None)
if codec:
value = codec.decode(value)
typecode = codec
elif typecode == b'ustr':
vlen = block.read(b'>I')[0]
value = block.read(2 * vlen).decode('utf-16be')
elif typecode == b'type':
value = block.read(b'>4s')[0]
elif typecode == b'comp' or typecode == b'dutc':
value = block.read(b'>Q')[0]
else:
raise ValueError('Unknown type code "%s"' % typecode)
return DSStoreEntry(filename, code, typecode, value)
def __lt__(self, other):
if not isinstance(other, DSStoreEntry):
raise TypeError('Can only compare against other DSStoreEntry objects')
sfl = self.filename.lower()
ofl = other.filename.lower()
return (sfl < ofl
or (self.filename == other.filename
and self.code < other.code))
def __le__(self, other):
if not isinstance(other, DSStoreEntry):
raise TypeError('Can only compare against other DSStoreEntry objects')
sfl = self.filename.lower()
ofl = other.filename.lower()
return (sfl < ofl
or (sfl == ofl
and self.code <= other.code))
def __eq__(self, other):
if not isinstance(other, DSStoreEntry):
raise TypeError('Can only compare against other DSStoreEntry objects')
sfl = self.filename.lower()
ofl = other.filename.lower()
return (sfl == ofl
and self.code == other.code)
def __ne__(self, other):
if not isinstance(other, DSStoreEntry):
raise TypeError('Can only compare against other DSStoreEntry objects')
sfl = self.filename.lower()
ofl = other.filename.lower()
return (sfl != ofl
or self.code != other.code)
def __gt__(self, other):
if not isinstance(other, DSStoreEntry):
raise TypeError('Can only compare against other DSStoreEntry objects')
sfl = self.filename.lower()
ofl = other.filename.lower()
selfCode = self.code
if str != bytes and type(selfCode) is bytes:
selfCode = selfCode.decode('utf-8')
otherCode = other.code
if str != bytes and type(otherCode) is bytes:
otherCode = otherCode.decode('utf-8')
return (sfl > ofl or (sfl == ofl and selfCode > otherCode))
def __ge__(self, other):
if not isinstance(other, DSStoreEntry):
raise TypeError('Can only compare against other DSStoreEntry objects')
sfl = self.filename.lower()
ofl = other.filename.lower()
return (sfl > ofl
or (sfl == ofl
and self.code >= other.code))
def __cmp__(self, other):
if not isinstance(other, DSStoreEntry):
raise TypeError('Can only compare against other DSStoreEntry objects')
r = cmp(self.filename.lower(), other.filename.lower())
if r:
return r
return cmp(self.code, other.code)
def byte_length(self):
"""Compute the length of this entry, in bytes"""
utf16 = self.filename.encode('utf-16be')
l = 4 + len(utf16) + 8
if isinstance(self.type, unicode):
entry_type = self.type.encode('latin_1')
value = self.value
elif isinstance(self.type, (bytes, str)):
entry_type = self.type
value = self.value
else:
entry_type = b'blob'
value = self.type.encode(self.value)
if entry_type == b'bool':
l += 1
elif entry_type == b'long' or entry_type == b'shor':
l += 4
elif entry_type == b'blob':
l += 4 + len(value)
elif entry_type == b'ustr':
utf16 = value.encode('utf-16be')
l += 4 + len(utf16)
elif entry_type == b'type':
l += 4
elif entry_type == b'comp' or entry_type == b'dutc':
l += 8
else:
raise ValueError('Unknown type code "%s"' % entry_type)
return l
def write(self, block, insert=False):
"""Write this entry to the specified Block"""
if insert:
w = block.insert
else:
w = block.write
if isinstance(self.type, unicode):
entry_type = self.type.encode('latin_1')
value = self.value
elif isinstance(self.type, (bytes, str)):
entry_type = self.type
value = self.value
else:
entry_type = b'blob'
value = self.type.encode(self.value)
utf16 = self.filename.encode('utf-16be')
w(b'>I', len(utf16) // 2)
w(utf16)
w(b'>4s4s', self.code, entry_type)
if entry_type == b'bool':
w(b'>?', value)
elif entry_type == b'long' or entry_type == b'shor':
w(b'>I', value)
elif entry_type == b'blob':
w(b'>I', len(value))
w(value)
elif entry_type == b'ustr':
utf16 = value.encode('utf-16be')
w(b'>I', len(utf16) // 2)
w(utf16)
elif entry_type == b'type':
if isinstance(value, unicode):
value = value.encode('latin_1')
w(b'>4s', value)
elif entry_type == b'comp' or entry_type == b'dutc':
w(b'>Q', value)
else:
raise ValueError('Unknown type code "%s"' % entry_type)
def __repr__(self):
return '<%s %s>' % (self.filename, self.code)
class DSStore(object):
"""Python interface to a ``.DS_Store`` file. Works by manipulating the file
on the disk---so this code will work with ``.DS_Store`` files for *very*
large directories.
A :class:`DSStore` object can be used as if it was a mapping, e.g.::
d['foobar.dat']['Iloc']
will fetch the "Iloc" record for "foobar.dat", or raise :class:`KeyError` if
there is no such record. If used in this manner, the :class:`DSStore` object
will return (type, value) tuples, unless the type is "blob" and the module
knows how to decode it.
Currently, we know how to decode "Iloc", "bwsp", "lsvp", "lsvP" and "icvp"
blobs. "Iloc" decodes to an (x, y) tuple, while the others are all decoded
using ``biplist``.
Assignment also works, e.g.::
d['foobar.dat']['note'] = ('ustr', u'Hello World!')
as does deletion with ``del``::
del d['foobar.dat']['note']
This is usually going to be the most convenient interface, though
occasionally (for instance when creating a new ``.DS_Store`` file) you
may wish to drop down to using :class:`DSStoreEntry` objects directly."""
def __init__(self, store):
self._store = store
self._superblk = self._store['DSDB']
with self._get_block(self._superblk) as s:
self._rootnode, self._levels, self._records, \
self._nodes, self._page_size = s.read(b'>IIIII')
self._min_usage = 2 * self._page_size // 3
self._dirty = False
@classmethod
def open(cls, file_or_name, mode='r+', initial_entries=None):
"""Open a ``.DS_Store`` file; pass either a Python file object, or a
filename in the ``file_or_name`` argument and a file access mode in
the ``mode`` argument. If you are creating a new file using the "w"
or "w+" modes, you may also specify a list of entries with which
to initialise the file."""
store = buddy.Allocator.open(file_or_name, mode)
if mode == 'w' or mode == 'w+':
superblk = store.allocate(20)
store['DSDB'] = superblk
page_size = 4096
if not initial_entries:
root = store.allocate(page_size)
with store.get_block(root) as rootblk:
rootblk.zero_fill()
with store.get_block(superblk) as s:
s.write(b'>IIIII', root, 0, 0, 1, page_size)
else:
# Make sure they're in sorted order
initial_entries = list(initial_entries)
initial_entries.sort()
# Construct the tree
current_level = initial_entries
next_level = []
levels = []
ptr_size = 0
node_count = 0
while True:
total = 8
nodes = []
node = []
for e in current_level:
new_total = total + ptr_size + e.byte_length()
if new_total > page_size:
nodes.append(node)
next_level.append(e)
total = 8
node = []
else:
total = new_total
node.append(e)
if node:
nodes.append(node)
node_count += len(nodes)
levels.append(nodes)
if len(nodes) == 1:
break
current_level = next_level
next_level = []
ptr_size = 4
# Allocate nodes
ptrs = [store.allocate(page_size) for n in range(node_count)]
# Generate nodes
pointers = []
prev_pointers = None
for level in levels:
ppndx = 0
lptrs = ptrs[-len(level):]
del ptrs[-len(level):]
for node in level:
ndx = lptrs.pop(0)
if prev_pointers is None:
with store.get_block(ndx) as block:
block.write(b'>II', 0, len(node))
for e in node:
e.write(block)
else:
next_node = prev_pointers[ppndx + len(node)]
node_ptrs = prev_pointers[ppndx:ppndx+len(node)]
with store.get_block(ndx) as block:
block.write(b'>II', next_node, len(node))
for ptr, e in zip(node_ptrs, node):
block.write(b'>I', ptr)
e.write(block)
pointers.append(ndx)
prev_pointers = pointers
pointers = []
root = prev_pointers[0]
with store.get_block(superblk) as s:
s.write(b'>IIIII', root, len(levels), len(initial_entries),
node_count, page_size)
return DSStore(store)
def _get_block(self, number):
return self._store.get_block(number)
def flush(self):
"""Flush any dirty data back to the file."""
if self._dirty:
self._dirty = False
with self._get_block(self._superblk) as s:
s.write(b'>IIIII', self._rootnode, self._levels, self._records,
self._nodes, self._page_size)
self._store.flush()
def close(self):
"""Flush dirty data and close the underlying file."""
self.flush()
self._store.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
# Internal B-Tree nodes look like this:
#
# [ next | count | (ptr0 | rec0) | (ptr1 | rec1) ... (ptrN | recN) ]
# Leaf nodes look like this:
#
# [ 0 | count | rec0 | rec1 ... recN ]
# Iterate over the tree, starting at `node'
def _traverse(self, node):
if node is None:
node = self._rootnode
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
if next_node:
for n in range(count):
ptr = block.read(b'>I')[0]
for e in self._traverse(ptr):
yield e
e = DSStoreEntry.read(block)
yield e
for e in self._traverse(next_node):
yield e
else:
for n in range(count):
e = DSStoreEntry.read(block)
yield e
# Display the data in `node'
def _dump_node(self, node):
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
print('next: %u\ncount: %u\n' % (next_node, count))
for n in range(count):
if next_node:
ptr = block.read(b'>I')[0]
print('%8u ' % ptr, end=' ')
else:
print(' ', end=' ')
e = DSStoreEntry.read(block)
print(e, ' (%u)' % e.byte_length())
print('used: %u' % block.tell())
# Display the data in the super block
def _dump_super(self):
print('root: %u\nlevels: %u\nrecords: %u\nnodes: %u\npage-size: %u' \
% (self._rootnode, self._levels, self._records,
self._nodes, self._page_size))
# Splits entries across two blocks, returning one pivot
#
# Tries to balance the block usage across the two as best it can
def _split2(self, blocks, entries, pointers, before, internal):
left_block = blocks[0]
right_block = blocks[1]
count = len(entries)
# Find the feasible splits
best_split = None
best_diff = None
total = before[count]
if 8 + total <= self._page_size:
# We can use a *single* node for this
best_split = count
else:
# Split into two nodes
for n in range(1, count - 1):
left_size = 8 + before[n]
right_size = 8 + total - before[n + 1]
if left_size > self._page_size:
break
if right_size > self._page_size:
continue
diff = abs(left_size - right_size)
if best_split is None or diff < best_diff:
best_split = n
best_diff = diff
if best_split is None:
return None
# Write the nodes
left_block.seek(0)
if internal:
next_node = pointers[best_split]
else:
next_node = 0
left_block.write(b'>II', next_node, best_split)
for n in range(best_split):
if internal:
left_block.write(b'>I', pointers[n])
entries[n].write(left_block)
left_block.zero_fill()
if best_split == count:
return []
right_block.seek(0)
if internal:
next_node = pointers[count]
else:
next_node = 0
right_block.write(b'>II', next_node, count - best_split - 1)
for n in range(best_split + 1, count):
if internal:
right_block.write(b'>I', pointers[n])
entries[n].write(right_block)
right_block.zero_fill()
pivot = entries[best_split]
return [pivot]
def _split(self, node, entry, right_ptr=0):
self._nodes += 1
self._dirty = True
new_right = self._store.allocate(self._page_size)
with self._get_block(node) as block, \
self._get_block(new_right) as right_block:
# First, measure and extract all the elements
entry_size = entry.byte_length()
entry_pos = None
next_node, count = block.read(b'>II')
if next_node:
entry_size += 4
pointers = []
entries = []
before = []
total = 0
for n in range(count):
pos = block.tell()
if next_node:
ptr = block.read(b'>I')[0]
pointers.append(ptr)
e = DSStoreEntry.read(block)
if e > entry:
entry_pos = n
entries.append(entry)
pointers.append(right_ptr)
before.append(total)
total += entry_size
entries.append(e)
before.append(total)
total += block.tell() - pos
before.append(total)
if next_node:
pointers.append(next_node)
pivot = self._split2([block, right_block],
entries, pointers, before,
bool(next_node))[0]
self._records += 1
self._nodes += 1
self._dirty = True
return (pivot, new_right)
# Allocate a new root node containing the element `pivot' and the pointers
# `left' and `right'
def _new_root(self, left, pivot, right):
new_root = self._store.allocate(self._page_size)
with self._get_block(new_root) as block:
block.write(b'>III', right, 1, left)
pivot.write(block)
self._rootnode = new_root
self._levels += 1
self._nodes += 1
self._dirty = True
# Insert an entry into an inner node; `path' is the path from the root
# to `node', not including `node' itself. `right_ptr' is the new node
# pointer (inserted to the RIGHT of `entry')
def _insert_inner(self, path, node, entry, right_ptr):
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
insert_pos = None
insert_ndx = None
n = 0
while n < count:
pos = block.tell()
ptr = block.read(b'>I')[0]
e = DSStoreEntry.read(block)
if e == entry:
if n == count - 1:
right_ptr = next_node
next_node = ptr
block_seek(pos)
else:
right_ptr = block.read(b'>I')[0]
block.seek(pos + 4)
insert_pos = pos
insert_ndx = n
block.delete(e.byte_length() + 4)
count -= 1
self._records += 1
self._dirty = True
continue
elif insert_pos is None and e > entry:
insert_pos = pos
insert_ndx = n
n += 1
if insert_pos is None:
insert_pos = block.tell()
insert_ndx = count
remaining = self._page_size - block.tell()
if remaining < entry.byte_length() + 4:
pivot, new_right = self._split(node, entry, right_ptr)
if path:
self._insert_inner(path[:-1], path[-1], pivot, new_right)
else:
self._new_root(node, pivot, new_right)
else:
if insert_ndx == count:
block.seek(insert_pos)
block.write(b'>I', next_node)
entry.write(block)
next_node = right_ptr
else:
block.seek(insert_pos + 4)
entry.write(block, True)
block.insert('>I', right_ptr)
block.seek(0)
count += 1
block.write(b'>II', next_node, count)
self._records += 1
self._dirty = True
# Insert `entry' into the leaf node `node'
def _insert_leaf(self, path, node, entry):
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
insert_pos = None
insert_ndx = None
n = 0
while n < count:
pos = block.tell()
e = DSStoreEntry.read(block)
if e == entry:
insert_pos = pos
insert_ndx = n
block.seek(pos)
block.delete(e.byte_length())
count -= 1
self._records += 1
self._dirty = True
continue
elif insert_pos is None and e > entry:
insert_pos = pos
insert_ndx = n
n += 1
if insert_pos is None:
insert_pos = block.tell()
insert_ndx = count
remaining = self._page_size - block.tell()
if remaining < entry.byte_length():
pivot, new_right = self._split(node, entry)
if path:
self._insert_inner(path[:-1], path[-1], pivot, new_right)
else:
self._new_root(node, pivot, new_right)
else:
block.seek(insert_pos)
entry.write(block, True)
block.seek(0)
count += 1
block.write(b'>II', next_node, count)
self._records += 1
self._dirty = True
def insert(self, entry):
"""Insert ``entry`` (which should be a :class:`DSStoreEntry`)
into the B-Tree."""
path = []
node = self._rootnode
while True:
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
if next_node:
for n in range(count):
ptr = block.read(b'>I')[0]
e = DSStoreEntry.read(block)
if entry < e:
next_node = ptr
break
elif entry == e:
# If we find an existing entry the same, replace it
self._insert_inner(path, node, entry, None)
return
path.append(node)
node = next_node
else:
self._insert_leaf(path, node, entry)
return
# Return usage information for the specified `node'
def _block_usage(self, node):
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
for n in range(count):
if next_node:
ptr = block.read(b'>I')[0]
e = DSStoreEntry.read(block)
used = block.tell()
return (count, used)
# Splits entries across three blocks, returning two pivots
def _split3(self, blocks, entries, pointers, before, internal):
count = len(entries)
# Find the feasible splits
best_split = None
best_diff = None
total = before[count]
for n in range(1, count - 3):
left_size = 8 + before[n]
remaining = 16 + total - before[n + 1]
if left_size > self._page_size:
break
if remaining > 2 * self._page_size:
continue
for m in range(n + 2, count - 1):
mid_size = 8 + before[m] - before[n + 1]
right_size = 8 + total - before[m + 1]
if mid_size > self._page_size:
break
if right_size > self._page_size:
continue
diff = abs(left_size - mid_size) * abs(right_size - mid_size)
if best_split is None or diff < best_diff:
best_split = (n, m, count)
best_diff = diff
if best_split is None:
return None
# Write the nodes
prev_split = -1
for block, split in zip(blocks, best_split):
block.seek(0)
if internal:
next_node = pointers[split]
else:
next_node = 0
block.write(b'>II', next_node, split)
for n in range(prev_split + 1, split):
if internal:
block.write(b'>I', pointers[n])
entries[n].write(block)
block.zero_fill()
prev_split = split
return (entries[best_split[0]], entries[best_split[1]])
# Extract all of the entries from the specified list of `blocks',
# separating them by the specified `pivots'. Also computes the
# amount of space used before each entry.
def _extract(self, blocks, pivots):
pointers = []
entries = []
before = []
total = 0
ppivots = pivots + [None]
for b,p in zip(blocks, ppivots):
b.seek(0)
next_node, count = b.read(b'>II')
for n in range(count):
pos = b.tell()
if next_node:
ptr = b.read(b'>I')[0]
pointers.append(ptr)
e = DSStoreEntry.read(b)
entries.append(e)
before.append(total)
total += b.tell() - pos
if next_node:
pointers.append(next_node)
if p:
entries.append(p)
before.append(total)
total += p.byte_length()
if next_node:
total += 4
before.append(total)
return (entries, pointers, before)
# Rebalance the specified `node', whose path from the root is `path'.
def _rebalance(self, path, node):
# Can't rebalance the root
if not path:
return
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
with self._get_block(path[-1]) as parent:
# Find the left and right siblings and respective pivots
parent_next, parent_count = parent.read(b'>II')
left_pos = None
left_node = None
left_pivot = None
node_pos = None
right_pos = None
right_node = None
right_pivot = None
prev_e = prev_ptr = prev_pos = None
for n in range(parent_count):
pos = parent.tell()
ptr = parent.read(b'>I')[0]
e = DSStoreEntry.read(parent)
if ptr == node:
node_pos = pos
right_pivot = e
left_pos = prev_pos
left_pivot = prev_e
left_node = prev_ptr
elif prev_ptr == node:
right_node = ptr
right_pos = pos
break
prev_e = e
prev_ptr = ptr
prev_pos = pos
if parent_next == node:
node_pos = parent.tell()
left_pos = prev_pos
left_pivot = prev_e
left_node = prev_ptr
elif right_node is None:
right_node = parent_next
right_pos = parent.tell()
parent_used = parent.tell()
if left_node and right_node:
with self._get_block(left_node) as left, \
self._get_block(right_node) as right:
blocks = [left, block, right]
pivots = [left_pivot, right_pivot]
entries, pointers, before = self._extract(blocks, pivots)
# If there's a chance that we could use two pages instead
# of three, go for it
pivots = self._split2(blocks, entries, pointers,
before, bool(next_node))
if pivots is None:
ptrs = [left_node, node, right_node]
pivots = self._split3(blocks, entries, pointers,
before, bool(next_node))
else:
if pivots:
ptrs = [left_node, node]
else:
ptrs = [left_node]
self._store.release(node)
self._nodes -= 1
node = left_node
self._store.release(right_node)
self._nodes -= 1
self._dirty = True
# Remove the pivots from the parent
with self._get_block(path[-1]) as parent:
if right_node == parent_next:
parent.seek(left_pos)
parent.delete(right_pos - left_pos)
parent_next = left_node
else:
parent.seek(left_pos + 4)
parent.delete(right_pos - left_pos)
parent.seek(0)
parent_count -= 2
parent.write(b'>II', parent_next, parent_count)
self._records -= 2
# Replace with those in pivots
for e,rp in zip(pivots, ptrs[1:]):
self._insert_inner(path[:-1], path[-1], e, rp)
elif left_node:
with self._get_block(left_node) as left:
blocks = [left, block]
pivots = [left_pivot]
entries, pointers, before = self._extract(blocks, pivots)
pivots = self._split2(blocks, entries, pointers,
before, bool(next_node))
# Remove the pivot from the parent
with self._get_block(path[-1]) as parent:
if node == parent_next:
parent.seek(left_pos)
parent.delete(node_pos - left_pos)
parent_next = left_node
else:
parent.seek(left_pos + 4)
parent.delete(node_pos - left_pos)
parent.seek(0)
parent_count -= 1
parent.write(b'>II', parent_next, parent_count)
self._records -= 1
# Replace the pivot
if pivots:
self._insert_inner(path[:-1], path[-1], pivots[0], node)
elif right_node:
with self._get_block(right_node) as right:
blocks = [block, right]
pivots = [right_pivot]
entries, pointers, before = self._extract(blocks, pivots)
pivots = self._split2(blocks, entries, pointers,
before, bool(next_node))
# Remove the pivot from the parent
with self._get_block(path[-1]) as parent:
if right_node == parent_next:
parent.seek(pos)
parent.delete(right_pos - node_pos)
parent_next = node
else:
parent.seek(pos + 4)
parent.delete(right_pos - node_pos)
parent.seek(0)
parent_count -= 1
parent.write(b'>II', parent_next, parent_count)
self._records -= 1
# Replace the pivot
if pivots:
self._insert_inner(path[:-1], path[-1], pivots[0],
right_node)
if not path and not parent_count:
self._store.release(path[-1])
self._nodes -= 1
self._dirty = True
self._rootnode = node
else:
count, used = self._block_usage(path[-1])
if used < self._page_size // 2:
self._rebalance(path[:-1], path[-1])
# Delete from the leaf node `node'. `filename_lc' has already been
# lower-cased.
def _delete_leaf(self, node, filename_lc, code):
found = False
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
for n in range(count):
pos = block.tell()
e = DSStoreEntry.read(block)
if e.filename.lower() == filename_lc \
and (code is None or e.code == code):
block.seek(pos)
block.delete(e.byte_length())
found = True
# This does not affect the loop; THIS IS NOT A BUG
count -= 1
self._records -= 1
self._dirty = True
if found:
used = block.tell()
block.seek(0)
block.write(b'>II', next_node, count)
return used < self._page_size // 2
else:
return False
# Remove the largest entry from the subtree starting at `node' (with
# path from root `path'). Returns a tuple (rebalance, entry) where
# rebalance is either None if no rebalancing is required, or a
# (path, node) tuple giving the details of the node to rebalance.
def _take_largest(self, path, node):
path = list(path)
rebalance = None
while True:
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
if next_node:
path.append(node)
node = next_node
continue
for n in range(count):
pos = block.tell()
e = DSStoreEntry.read(block)
count -= 1
block.seek(0)
block.write(b'>II', next_node, count)
if pos < self._page_size // 2:
rebalance = (path, node)
break
return rebalance, e
# Delete an entry from an inner node, `node'
def _delete_inner(self, path, node, filename_lc, code):
rebalance = False
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
for n in range(count):
pos = block.tell()
ptr = block.read(b'>I')[0]
e = DSStoreEntry.read(block)
if e.filename.lower() == filename_lc \
and (code is None or e.code == code):
# Take the largest from the left subtree
rebalance, largest = self._take_largest(path, ptr)
# Delete this entry
if n == count - 1:
right_ptr = next_node
next_node = ptr
block.seek(pos)
else:
right_ptr = block.read(b'>I')[0]
block.seek(pos + 4)
block.delete(e.byte_length() + 4)
count -= 1
block.seek(0)
block.write(b'>II', next_node, count)
self._records -= 1
self._dirty = True
break
# Replace the pivot value
self._insert_inner(path, node, largest, right_ptr)
# Rebalance from the node we stole from
if rebalance:
self._rebalance(rebalance[0], rebalance[1])
return True
return False
def delete(self, filename, code):
"""Delete an item, identified by ``filename`` and ``code``
from the B-Tree."""
if isinstance(filename, DSStoreEntry):
code = filename.code
filename = filename.filename
# If we're deleting *every* node for "filename", we must recurse
if code is None:
###TODO: Fix this so we can do bulk deletes
raise ValueError('You must delete items individually. Sorry')
# Otherwise, we're deleting *one* specific node
filename_lc = filename.lower()
path = []
node = self._rootnode
while True:
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
if next_node:
for n in range(count):
ptr = block.read(b'>I')[0]
e = DSStoreEntry.read(block)
e_lc = e.filename.lower()
if filename_lc < e_lc \
or (filename_lc == e_lc and code < e.code):
next_node = ptr
break
elif filename_lc == e_lc and code == e.code:
self._delete_inner(path, node, filename_lc, code)
return
path.append(node)
node = next_node
else:
if self._delete_leaf(node, filename_lc, code):
self._rebalance(path, node)
return
# Find implementation
def _find(self, node, filename_lc, code=None):
if not isinstance(code, bytes):
code = code.encode('latin_1')
with self._get_block(node) as block:
next_node, count = block.read(b'>II')
if next_node:
for n in range(count):
ptr = block.read(b'>I')[0]
e = DSStoreEntry.read(block)
if filename_lc < e.filename.lower():
for e in self._find(ptr, filename_lc, code):
yield e
return
elif filename_lc == e.filename.lower():
if code is None or (code and code < e.code):
for e in self._find(ptr, filename_lc, code):
yield e
if code is None or code == e.code:
yield e
elif code < e.code:
return
for e in self._find(next_node, filename_lc, code):
yield e
else:
for n in range(count):
e = DSStoreEntry.read(block)
if filename_lc == e.filename.lower():
if code is None or code == e.code:
yield e
elif code < e.code:
return
def find(self, filename, code=None):
"""Returns a generator that will iterate over matching entries in
the B-Tree."""
if isinstance(filename, DSStoreEntry):
code = filename.code
filename = filename.filename
filename_lc = filename.lower()
return self._find(self._rootnode, filename_lc, code)
def __len__(self):
return self._records
def __iter__(self):
return self._traverse(self._rootnode)
class Partial(object):
"""This is used to implement indexing."""
def __init__(self, store, filename):
self._store = store
self._filename = filename
def __getitem__(self, code):
if code is None:
raise KeyError('no such key - [%s][None]' % self._filename)
if not isinstance(code, bytes):
code = code.encode('latin_1')
try:
item = next(self._store.find(self._filename, code))
except StopIteration:
raise KeyError('no such key - [%s][%s]' % (self._filename,
code))
if not isinstance(item.type, (bytes, str, unicode)):
return item.value
return (item.type, item.value)
def __setitem__(self, code, value):
if code is None:
raise KeyError('bad key - [%s][None]' % self._filename)
if not isinstance(code, bytes):
code = code.encode('latin_1')
codec = codecs.get(code, None)
if codec:
entry_type = codec
entry_value = value
else:
entry_type = value[0]
entry_value = value[1]
self._store.insert(DSStoreEntry(self._filename, code,
entry_type, entry_value))
def __delitem__(self, code):
if code is None:
raise KeyError('no such key - [%s][None]' % self._filename)
self._store.delete(self._filename, code)
def __iter__(self):
for item in self._store.find(self._filename):
yield item
def __getitem__(self, filename):
return self.Partial(self, filename)
| lgpl-2.1 |
ariakerstein/twitterFlaskClone | project/lib/python2.7/site-packages/pip/vcs/git.py | 280 | 7608 | from __future__ import absolute_import
import logging
import tempfile
import os.path
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
super(Git, self).__init__(url, *args, **kwargs)
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warning(
"Could not find a tag or branch '%s', assuming commit.", rev,
)
return rev_options
def switch(self, dest, url, rev_options):
self.run_command(['config', 'remote.origin.url', url], cwd=dest)
self.run_command(['checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(
rev_options[0], dest, rev_options,
)
self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning %s%s to %s', url, rev_display, display_path(dest),
)
self.run_command(['clone', '-q', url, dest])
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
self.run_command(
['checkout', '-q'] + rev_options,
cwd=dest,
)
#: repo may contain submodules
self.update_submodules(dest)
def get_url(self, location):
url = self.run_command(
['config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = self.run_command(
['rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
output = self.run_command(['show-ref'],
show_stdout=False, cwd=location)
rv = {}
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
ref = ref.strip()
ref_name = None
if ref.startswith('refs/remotes/'):
ref_name = ref[len('refs/remotes/'):]
elif ref.startswith('refs/heads/'):
ref_name = ref[len('refs/heads/'):]
elif ref.startswith('refs/tags/'):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit.strip()
return rv
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
refs = self.get_refs(location)
# refs maps names to commit hashes; we need the inverse
# if multiple names map to a single commit, we pick the first one
# alphabetically
names_by_commit = {}
for ref, commit in sorted(refs.items()):
if commit not in names_by_commit:
names_by_commit[commit] = ref
if current_rev in names_by_commit:
# It's a tag or branch.
name = names_by_commit[current_rev]
full_egg_name = (
'%s-%s' % (egg_project_name, self.translate_egg_surname(name))
)
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if '://' not in self.url:
assert 'file:' not in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
self.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
vcs.register(Git)
| mit |
ABcDexter/cython | tests/run/withstat_py.py | 29 | 4281 | import sys
def typename(t):
name = type(t).__name__
if sys.version_info < (2,5):
if name == 'classobj' and issubclass(t, MyException):
name = 'type'
elif name == 'instance' and isinstance(t, MyException):
name = 'MyException'
return "<type '%s'>" % name
class MyException(Exception):
pass
class ContextManager(object):
def __init__(self, value, exit_ret = None):
self.value = value
self.exit_ret = exit_ret
def __exit__(self, a, b, tb):
print("exit %s %s %s" % (typename(a), typename(b), typename(tb)))
return self.exit_ret
def __enter__(self):
print("enter")
return self.value
def no_as():
"""
>>> no_as()
enter
hello
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager("value"):
print("hello")
def basic():
"""
>>> basic()
enter
value
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager("value") as x:
print(x)
def with_pass():
"""
>>> with_pass()
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager("value") as x:
pass
def with_return():
"""
>>> print(with_return())
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
value
"""
with ContextManager("value") as x:
return x
def with_break():
"""
>>> print(with_break())
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
a
"""
for c in list("abc"):
with ContextManager("value") as x:
break
print("FAILED")
return c
def with_continue():
"""
>>> print(with_continue())
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
enter
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
c
"""
for c in list("abc"):
with ContextManager("value") as x:
continue
print("FAILED")
return c
def with_exception(exit_ret):
"""
>>> with_exception(None)
enter
value
exit <type 'type'> <type 'MyException'> <type 'traceback'>
outer except
>>> with_exception(True)
enter
value
exit <type 'type'> <type 'MyException'> <type 'traceback'>
"""
try:
with ContextManager("value", exit_ret=exit_ret) as value:
print(value)
raise MyException()
except:
print("outer except")
def with_real_lock():
"""
>>> with_real_lock()
about to acquire lock
holding lock
lock no longer held
"""
from threading import Lock
lock = Lock()
print("about to acquire lock")
with lock:
print("holding lock")
print("lock no longer held")
def functions_in_with():
"""
>>> f = functions_in_with()
enter
exit <type 'type'> <type 'MyException'> <type 'traceback'>
outer except
>>> f(1)[0]
1
>>> print(f(1)[1])
value
"""
try:
with ContextManager("value") as value:
def f(x): return x, value
make = lambda x:x()
raise make(MyException)
except:
print("outer except")
return f
def multitarget():
"""
>>> multitarget()
enter
1 2 3 4 5
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager((1, 2, (3, (4, 5)))) as (a, b, (c, (d, e))):
print('%s %s %s %s %s' % (a, b, c, d, e))
def tupletarget():
"""
>>> tupletarget()
enter
(1, 2, (3, (4, 5)))
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with ContextManager((1, 2, (3, (4, 5)))) as t:
print(t)
class GetManager(object):
def get(self, *args):
return ContextManager(*args)
def manager_from_expression():
"""
>>> manager_from_expression()
enter
1
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
enter
2
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with GetManager().get(1) as x:
print(x)
g = GetManager()
with g.get(2) as x:
print(x)
| apache-2.0 |
nebril/fuel-web | nailgun/nailgun/db/migration/__init__.py | 5 | 3917 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import six
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from nailgun.db.sqlalchemy import db_str
def make_alembic_config(script_location, version_table, **kwargs):
config = alembic_config.Config(
os.path.join(os.path.dirname(__file__), 'alembic.ini'))
config.set_main_option('script_location', script_location)
config.set_main_option('sqlalchemy.url', db_str)
config.set_main_option('version_table', version_table)
for k, v in six.iteritems(kwargs):
config.set_main_option(k, v)
return config
def make_alembic_config_from_extension(extension):
return make_alembic_config(
extension.alembic_migrations_path(),
extension.alembic_table_version(),
table_prefix=extension.table_prefix())
# Alembic config for core components
ALEMBIC_CONFIG = make_alembic_config(
'nailgun.db.migration:alembic_migrations',
'alembic_version')
def do_alembic_command(cmd, config, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_stamp(cmd, config):
do_alembic_command(
cmd,
config,
config.params.revision,
sql=config.params.sql)
def do_revision(cmd, config):
do_alembic_command(
cmd,
config,
message=config.params.message,
autogenerate=config.params.autogenerate,
sql=config.params.sql)
def do_upgrade_downgrade(cmd, config):
params = config.params
if not params.revision and not params.delta:
raise SystemExit('You must provide a revision or relative delta')
if params.delta:
sign = '+' if params.name == 'upgrade' else '-'
revision = sign + str(params.delta)
else:
revision = params.revision
do_alembic_command(cmd, config, revision, sql=params.sql)
def do_upgrade_head_extensions():
from nailgun.extensions import get_all_extensions
for extension in get_all_extensions():
if extension.alembic_migrations_path():
config = make_alembic_config_from_extension(extension)
do_alembic_command('upgrade', config, 'head')
def do_upgrade_head_core():
do_alembic_command('upgrade', ALEMBIC_CONFIG, 'head')
def do_upgrade_head():
do_upgrade_head_core()
do_upgrade_head_extensions()
def action_migrate(config):
actions = {
'current': do_alembic_command,
'history': do_alembic_command,
'branches': do_alembic_command,
'upgrade': do_upgrade_downgrade,
'downgrade': do_upgrade_downgrade,
'stamp': do_stamp,
'revision': do_revision
}
actions[config.params.alembic_command](
config.params.alembic_command, config)
def action_migrate_alembic_core(params):
global ALEMBIC_CONFIG
config = ALEMBIC_CONFIG
config.params = params
action_migrate(config)
def action_migrate_alembic_extension(params, extension):
config = make_alembic_config_from_extension(extension)
config.params = params
action_migrate(config)
def drop_migration_meta(engine):
engine.execute("DROP TABLE IF EXISTS alembic_version")
| apache-2.0 |
loulich/Couchpotato | libs/guessit/transfo/guess_weak_episodes_rexps.py | 94 | 2184 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import Guess
from guessit.transfo import SingleNodeGuesser
from guessit.patterns import weak_episode_rexps
import re
import logging
log = logging.getLogger(__name__)
def guess_weak_episodes_rexps(string, node):
if 'episodeNumber' in node.root.info:
return None, None
for rexp, span_adjust in weak_episode_rexps:
match = re.search(rexp, string, re.IGNORECASE)
if match:
metadata = match.groupdict()
span = (match.start() + span_adjust[0],
match.end() + span_adjust[1])
epnum = int(metadata['episodeNumber'])
if epnum > 100:
season, epnum = epnum // 100, epnum % 100
# episodes which have a season > 25 are most likely errors
# (Simpsons is at 23!)
if season > 25:
continue
return Guess({ 'season': season,
'episodeNumber': epnum },
confidence=0.6, raw=string[span[0]:span[1]]), span
else:
return Guess(metadata, confidence=0.3, raw=string[span[0]:span[1]]), span
return None, None
guess_weak_episodes_rexps.use_node = True
def process(mtree):
SingleNodeGuesser(guess_weak_episodes_rexps, 0.6, log).process(mtree)
| gpl-3.0 |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/third_party/mesa/src/src/gallium/drivers/svga/svgadump/svga_dump.py | 50 | 11879 | #!/usr/bin/env python
'''
Generates dumper for the SVGA 3D command stream using pygccxml.
Jose Fonseca <jfonseca@vmware.com>
'''
copyright = '''
/**********************************************************
* Copyright 2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
'''
import os
import sys
from pygccxml import parser
from pygccxml import declarations
from pygccxml.declarations import algorithm
from pygccxml.declarations import decl_visitor
from pygccxml.declarations import type_traits
from pygccxml.declarations import type_visitor
enums = True
class decl_dumper_t(decl_visitor.decl_visitor_t):
def __init__(self, instance = '', decl = None):
decl_visitor.decl_visitor_t.__init__(self)
self._instance = instance
self.decl = decl
def clone(self):
return decl_dumper_t(self._instance, self.decl)
def visit_class(self):
class_ = self.decl
assert self.decl.class_type in ('struct', 'union')
for variable in class_.variables():
if variable.name != '':
#print 'variable = %r' % variable.name
dump_type(self._instance + '.' + variable.name, variable.type)
def visit_enumeration(self):
if enums:
print ' switch(%s) {' % ("(*cmd)" + self._instance,)
for name, value in self.decl.values:
print ' case %s:' % (name,)
print ' _debug_printf("\\t\\t%s = %s\\n");' % (self._instance, name)
print ' break;'
print ' default:'
print ' _debug_printf("\\t\\t%s = %%i\\n", %s);' % (self._instance, "(*cmd)" + self._instance)
print ' break;'
print ' }'
else:
print ' _debug_printf("\\t\\t%s = %%i\\n", %s);' % (self._instance, "(*cmd)" + self._instance)
def dump_decl(instance, decl):
dumper = decl_dumper_t(instance, decl)
algorithm.apply_visitor(dumper, decl)
class type_dumper_t(type_visitor.type_visitor_t):
def __init__(self, instance, type_):
type_visitor.type_visitor_t.__init__(self)
self.instance = instance
self.type = type_
def clone(self):
return type_dumper_t(self.instance, self.type)
def visit_char(self):
self.print_instance('%i')
def visit_unsigned_char(self):
self.print_instance('%u')
def visit_signed_char(self):
self.print_instance('%i')
def visit_wchar(self):
self.print_instance('%i')
def visit_short_int(self):
self.print_instance('%i')
def visit_short_unsigned_int(self):
self.print_instance('%u')
def visit_bool(self):
self.print_instance('%i')
def visit_int(self):
self.print_instance('%i')
def visit_unsigned_int(self):
self.print_instance('%u')
def visit_long_int(self):
self.print_instance('%li')
def visit_long_unsigned_int(self):
self.print_instance('%lu')
def visit_long_long_int(self):
self.print_instance('%lli')
def visit_long_long_unsigned_int(self):
self.print_instance('%llu')
def visit_float(self):
self.print_instance('%f')
def visit_double(self):
self.print_instance('%f')
def visit_array(self):
for i in range(type_traits.array_size(self.type)):
dump_type(self.instance + '[%i]' % i, type_traits.base_type(self.type))
def visit_pointer(self):
self.print_instance('%p')
def visit_declarated(self):
#print 'decl = %r' % self.type.decl_string
decl = type_traits.remove_declarated(self.type)
dump_decl(self.instance, decl)
def print_instance(self, format):
print ' _debug_printf("\\t\\t%s = %s\\n", %s);' % (self.instance, format, "(*cmd)" + self.instance)
def dump_type(instance, type_):
type_ = type_traits.remove_alias(type_)
visitor = type_dumper_t(instance, type_)
algorithm.apply_visitor(visitor, type_)
def dump_struct(decls, class_):
print 'static void'
print 'dump_%s(const %s *cmd)' % (class_.name, class_.name)
print '{'
dump_decl('', class_)
print '}'
print ''
cmds = [
('SVGA_3D_CMD_SURFACE_DEFINE', 'SVGA3dCmdDefineSurface', (), 'SVGA3dSize'),
('SVGA_3D_CMD_SURFACE_DESTROY', 'SVGA3dCmdDestroySurface', (), None),
('SVGA_3D_CMD_SURFACE_COPY', 'SVGA3dCmdSurfaceCopy', (), 'SVGA3dCopyBox'),
('SVGA_3D_CMD_SURFACE_STRETCHBLT', 'SVGA3dCmdSurfaceStretchBlt', (), None),
('SVGA_3D_CMD_SURFACE_DMA', 'SVGA3dCmdSurfaceDMA', (), 'SVGA3dCopyBox'),
('SVGA_3D_CMD_CONTEXT_DEFINE', 'SVGA3dCmdDefineContext', (), None),
('SVGA_3D_CMD_CONTEXT_DESTROY', 'SVGA3dCmdDestroyContext', (), None),
('SVGA_3D_CMD_SETTRANSFORM', 'SVGA3dCmdSetTransform', (), None),
('SVGA_3D_CMD_SETZRANGE', 'SVGA3dCmdSetZRange', (), None),
('SVGA_3D_CMD_SETRENDERSTATE', 'SVGA3dCmdSetRenderState', (), 'SVGA3dRenderState'),
('SVGA_3D_CMD_SETRENDERTARGET', 'SVGA3dCmdSetRenderTarget', (), None),
('SVGA_3D_CMD_SETTEXTURESTATE', 'SVGA3dCmdSetTextureState', (), 'SVGA3dTextureState'),
('SVGA_3D_CMD_SETMATERIAL', 'SVGA3dCmdSetMaterial', (), None),
('SVGA_3D_CMD_SETLIGHTDATA', 'SVGA3dCmdSetLightData', (), None),
('SVGA_3D_CMD_SETLIGHTENABLED', 'SVGA3dCmdSetLightEnabled', (), None),
('SVGA_3D_CMD_SETVIEWPORT', 'SVGA3dCmdSetViewport', (), None),
('SVGA_3D_CMD_SETCLIPPLANE', 'SVGA3dCmdSetClipPlane', (), None),
('SVGA_3D_CMD_CLEAR', 'SVGA3dCmdClear', (), 'SVGA3dRect'),
('SVGA_3D_CMD_PRESENT', 'SVGA3dCmdPresent', (), 'SVGA3dCopyRect'),
('SVGA_3D_CMD_SHADER_DEFINE', 'SVGA3dCmdDefineShader', (), None),
('SVGA_3D_CMD_SHADER_DESTROY', 'SVGA3dCmdDestroyShader', (), None),
('SVGA_3D_CMD_SET_SHADER', 'SVGA3dCmdSetShader', (), None),
('SVGA_3D_CMD_SET_SHADER_CONST', 'SVGA3dCmdSetShaderConst', (), None),
('SVGA_3D_CMD_DRAW_PRIMITIVES', 'SVGA3dCmdDrawPrimitives', (('SVGA3dVertexDecl', 'numVertexDecls'), ('SVGA3dPrimitiveRange', 'numRanges')), 'SVGA3dVertexDivisor'),
('SVGA_3D_CMD_SETSCISSORRECT', 'SVGA3dCmdSetScissorRect', (), None),
('SVGA_3D_CMD_BEGIN_QUERY', 'SVGA3dCmdBeginQuery', (), None),
('SVGA_3D_CMD_END_QUERY', 'SVGA3dCmdEndQuery', (), None),
('SVGA_3D_CMD_WAIT_FOR_QUERY', 'SVGA3dCmdWaitForQuery', (), None),
#('SVGA_3D_CMD_PRESENT_READBACK', None, (), None),
('SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN', 'SVGA3dCmdBlitSurfaceToScreen', (), 'SVGASignedRect'),
]
def dump_cmds():
print r'''
void
svga_dump_command(uint32_t cmd_id, const void *data, uint32_t size)
{
const uint8_t *body = (const uint8_t *)data;
const uint8_t *next = body + size;
'''
print ' switch(cmd_id) {'
indexes = 'ijklmn'
for id, header, body, footer in cmds:
print ' case %s:' % id
print ' _debug_printf("\\t%s\\n");' % id
print ' {'
print ' const %s *cmd = (const %s *)body;' % (header, header)
if len(body):
print ' unsigned ' + ', '.join(indexes[:len(body)]) + ';'
print ' dump_%s(cmd);' % header
print ' body = (const uint8_t *)&cmd[1];'
for i in range(len(body)):
struct, count = body[i]
idx = indexes[i]
print ' for(%s = 0; %s < cmd->%s; ++%s) {' % (idx, idx, count, idx)
print ' dump_%s((const %s *)body);' % (struct, struct)
print ' body += sizeof(%s);' % struct
print ' }'
if footer is not None:
print ' while(body + sizeof(%s) <= next) {' % footer
print ' dump_%s((const %s *)body);' % (footer, footer)
print ' body += sizeof(%s);' % footer
print ' }'
if id == 'SVGA_3D_CMD_SHADER_DEFINE':
print ' svga_shader_dump((const uint32_t *)body,'
print ' (unsigned)(next - body)/sizeof(uint32_t),'
print ' FALSE);'
print ' body = next;'
print ' }'
print ' break;'
print ' default:'
print ' _debug_printf("\\t0x%08x\\n", cmd_id);'
print ' break;'
print ' }'
print r'''
while(body + sizeof(uint32_t) <= next) {
_debug_printf("\t\t0x%08x\n", *(const uint32_t *)body);
body += sizeof(uint32_t);
}
while(body + sizeof(uint32_t) <= next)
_debug_printf("\t\t0x%02x\n", *body++);
}
'''
print r'''
void
svga_dump_commands(const void *commands, uint32_t size)
{
const uint8_t *next = commands;
const uint8_t *last = next + size;
assert(size % sizeof(uint32_t) == 0);
while(next < last) {
const uint32_t cmd_id = *(const uint32_t *)next;
if(SVGA_3D_CMD_BASE <= cmd_id && cmd_id < SVGA_3D_CMD_MAX) {
const SVGA3dCmdHeader *header = (const SVGA3dCmdHeader *)next;
const uint8_t *body = (const uint8_t *)&header[1];
next = body + header->size;
if(next > last)
break;
svga_dump_command(cmd_id, body, header->size);
}
else if(cmd_id == SVGA_CMD_FENCE) {
_debug_printf("\tSVGA_CMD_FENCE\n");
_debug_printf("\t\t0x%08x\n", ((const uint32_t *)next)[1]);
next += 2*sizeof(uint32_t);
}
else {
_debug_printf("\t0x%08x\n", cmd_id);
next += sizeof(uint32_t);
}
}
}
'''
def main():
print copyright.strip()
print
print '/**'
print ' * @file'
print ' * Dump SVGA commands.'
print ' *'
print ' * Generated automatically from svga3d_reg.h by svga_dump.py.'
print ' */'
print
print '#include "svga_types.h"'
print '#include "svga_shader_dump.h"'
print '#include "svga3d_reg.h"'
print
print '#include "util/u_debug.h"'
print '#include "svga_dump.h"'
print
config = parser.config_t(
include_paths = ['../../../include', '../include'],
compiler = 'gcc',
)
headers = [
'svga_types.h',
'svga3d_reg.h',
]
decls = parser.parse(headers, config, parser.COMPILATION_MODE.ALL_AT_ONCE)
global_ns = declarations.get_global_namespace(decls)
names = set()
for id, header, body, footer in cmds:
names.add(header)
for struct, count in body:
names.add(struct)
if footer is not None:
names.add(footer)
for class_ in global_ns.classes(lambda decl: decl.name in names):
dump_struct(decls, class_)
dump_cmds()
if __name__ == '__main__':
main()
| apache-2.0 |
darktears/chromium-crosswalk | tools/cr/cr/commands/install.py | 113 | 1206 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the install command."""
import cr
class InstallCommand(cr.Command):
"""The implementation of the install command.
This first uses Builder.Build to bring the target up to date, and then
installs it using Installer.Reinstall.
The builder installs its command line arguments, and you can use those to
select which builder is used. Selecting the skip builder
(using --builder=skip) bypasses the build stage.
"""
def __init__(self):
super(InstallCommand, self).__init__()
self.help = 'Install a binary'
def AddArguments(self, subparsers):
parser = super(InstallCommand, self).AddArguments(subparsers)
cr.Builder.AddArguments(self, parser)
cr.Installer.AddArguments(self, parser)
cr.Target.AddArguments(self, parser, allow_multiple=True)
self.ConsumeArgs(parser, 'the installer')
return parser
def Run(self):
targets = cr.Target.GetTargets()
if not cr.Installer.Skipping():
cr.Builder.Build(targets, [])
cr.Installer.Reinstall(targets, cr.context.remains)
| bsd-3-clause |
steventimberman/masterDebater | venv/lib/python2.7/site-packages/pip/wheel.py | 338 | 32010 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import functools
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.compat import expanduser
from pip.download import path_to_url, unpack_url
from pip.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, captured_stdout, rmtree, read_chunks,
)
from pip.utils.ui import open_spinner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = canonicalize_name(package_name)
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path))
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.optionxform = lambda option: option
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False, prefix=None):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated,
prefix=prefix,
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
canonicalize_name(s).startswith(
canonicalize_name(req.name))):
assert not info_dir, ('Multiple .dist-info directories: ' +
destsubdir + ', ' +
', '.join(info_dir))
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
# Record pip as the installer
installer = os.path.join(info_dir[0], 'INSTALLER')
temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
# Record details of all files installed
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((normpath(f, lib_dir), h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir, python_tag=None):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd, python_tag=python_tag):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
pass
# Ignore return, we can't do anything else useful.
self._clean_one(req)
return None
finally:
rmtree(tempd)
def _base_setup_args(self, req):
return [
sys.executable, "-u", '-c',
SETUPTOOLS_SHIM % req.setup_py
] + list(self.global_options)
def __build_one(self, req, tempd, python_tag=None):
base_args = self._base_setup_args(req)
spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
if python_tag is not None:
wheel_args += ["--python-tag", python_tag]
try:
call_subprocess(wheel_args, cwd=req.setup_py_dir,
show_stdout=False, spinner=spinner)
return True
except:
spinner.finish("error")
logger.error('Failed building wheel for %s', req.name)
return False
def _clean_one(self, req):
base_args = self._base_setup_args(req)
logger.info('Running setup.py clean for %s', req.name)
clean_args = base_args + ['clean', '--all']
try:
call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif autobuilding and req.editable:
pass
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
canonicalize_name(req.name)):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(
req, output_dir,
python_tag=python_tag,
)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| mit |
bhilburn/gnuradio | gnuradio-runtime/python/gnuradio/gr_unittest.py | 58 | 6303 | #!/usr/bin/env python
#
# Copyright 2004,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
GNU radio specific extension of unittest.
"""
import unittest
import gr_xmlrunner
import sys, os, stat
class TestCase(unittest.TestCase):
"""A subclass of unittest.TestCase that adds additional assertions
Adds new methods assertComplexAlmostEqual,
assertComplexTuplesAlmostEqual and assertFloatTuplesAlmostEqual
"""
def assertComplexAlmostEqual (self, first, second, places=7, msg=None):
"""Fail if the two complex objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero.
Note that decimal places (from zero) is usually not the same
as significant digits (measured from the most signficant digit).
"""
if round(second.real-first.real, places) != 0:
raise self.failureException, \
(msg or '%s != %s within %s places' % (`first`, `second`, `places` ))
if round(second.imag-first.imag, places) != 0:
raise self.failureException, \
(msg or '%s != %s within %s places' % (`first`, `second`, `places` ))
def assertComplexAlmostEqual2 (self, ref, x, abs_eps=1e-12, rel_eps=1e-6, msg=None):
"""
Fail if the two complex objects are unequal as determined by...
"""
if abs(ref - x) < abs_eps:
return
if abs(ref) > abs_eps:
if abs(ref-x)/abs(ref) > rel_eps:
raise self.failureException, \
(msg or '%s != %s rel_error = %s rel_limit = %s' % (
`ref`, `x`, abs(ref-x)/abs(ref), `rel_eps` ))
else:
raise self.failureException, \
(msg or '%s != %s rel_error = %s rel_limit = %s' % (
`ref`, `x`, abs(ref-x)/abs(ref), `rel_eps` ))
def assertComplexTuplesAlmostEqual (self, a, b, places=7, msg=None):
self.assertEqual (len(a), len(b))
for i in xrange (len(a)):
self.assertComplexAlmostEqual (a[i], b[i], places, msg)
def assertComplexTuplesAlmostEqual2 (self, ref, x,
abs_eps=1e-12, rel_eps=1e-6, msg=None):
self.assertEqual (len(ref), len(x))
for i in xrange (len(ref)):
try:
self.assertComplexAlmostEqual2 (ref[i], x[i], abs_eps, rel_eps, msg)
except self.failureException, e:
#sys.stderr.write("index = %d " % (i,))
#sys.stderr.write("%s\n" % (e,))
raise
def assertFloatTuplesAlmostEqual (self, a, b, places=7, msg=None):
self.assertEqual (len(a), len(b))
for i in xrange (len(a)):
self.assertAlmostEqual (a[i], b[i], places, msg)
def assertFloatTuplesAlmostEqual2 (self, ref, x,
abs_eps=1e-12, rel_eps=1e-6, msg=None):
self.assertEqual (len(ref), len(x))
for i in xrange (len(ref)):
try:
self.assertComplexAlmostEqual2 (ref[i], x[i], abs_eps, rel_eps, msg)
except self.failureException, e:
#sys.stderr.write("index = %d " % (i,))
#sys.stderr.write("%s\n" % (e,))
raise
TestResult = unittest.TestResult
TestSuite = unittest.TestSuite
FunctionTestCase = unittest.FunctionTestCase
TestLoader = unittest.TestLoader
TextTestRunner = unittest.TextTestRunner
TestProgram = unittest.TestProgram
main = TestProgram
def run(PUT, filename=None):
'''
Runs the unittest on a TestCase and produces an optional XML report
PUT: the program under test and should be a gr_unittest.TestCase
filename: an optional filename to save the XML report of the tests
this will live in ./.unittests/python
'''
# Run this is given a file name
if(filename is not None):
basepath = "./.unittests"
path = basepath + "/python"
if not os.path.exists(basepath):
os.makedirs(basepath, 0750)
xmlrunner = None
# only proceed if .unittests is writable
st = os.stat(basepath)[stat.ST_MODE]
if(st & stat.S_IWUSR > 0):
# Test if path exists; if not, build it
if not os.path.exists(path):
os.makedirs(path, 0750)
# Just for safety: make sure we can write here, too
st = os.stat(path)[stat.ST_MODE]
if(st & stat.S_IWUSR > 0):
# Create an XML runner to filename
fout = file(path+"/"+filename, "w")
xmlrunner = gr_xmlrunner.XMLTestRunner(fout)
txtrunner = TextTestRunner(verbosity=1)
# Run the test; runner also creates XML output file
# FIXME: make xmlrunner output to screen so we don't have to do run and main
suite = TestLoader().loadTestsFromTestCase(PUT)
# use the xmlrunner if we can write the the directory
if(xmlrunner is not None):
xmlrunner.run(suite)
main()
# This will run and fail make check if problem
# but does not output to screen.
#main(testRunner = xmlrunner)
else:
# If no filename is given, just run the test
main()
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
| gpl-3.0 |
bbaumer/ansible-modules-core | cloud/openstack/os_auth.py | 131 | 2039 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_auth
short_description: Retrieve an auth token
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Retrieve an auth token from an OpenStack Cloud
requirements:
- "python >= 2.6"
- "shade"
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Authenticate to the cloud and retrieve the service catalog
- os_auth:
cloud: rax-dfw
- debug: var=service_catalog
'''
def main():
argument_spec = openstack_full_argument_spec()
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
module.exit_json(
changed=False,
ansible_facts=dict(
auth_token=cloud.auth_token,
service_catalog=cloud.service_catalog))
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
dmart914/CTCI | 02-linked-lists/2-2-kth-last.py | 1 | 2647 | # 2-2 Implement an algorithm to find the kth to last element of a
# a singly linked list
# Pseudo:
# Iterate through all to find the length
# Iterate again to get the kth last
class Node:
'''A node in a singly linked list'''
def __init__(self, data):
self.data = data
self.next = None
def __str__(self):
return (str(self.data))
class LinkedList:
'''A linked list comprised of Nodes'''
def __init__(self, start=None):
self.start = start
self.current = start
self.last = start
def __str__(self):
returnString = 'LINKED LIST\n'
returnString += 'Current node data: '
returnString += str(self.current)
return returnString
def next(self):
if self.current == None:
return False
elif self.current.next == None:
return False
else:
self.current = self.current.next
return self.current
def reset(self):
self.current = self.start
# Pushes to the END; sets the current pointer to the
# node pushed, or the last nonde
def push(self, n):
if self.start == None:
print('start')
self.start = n
return
if (self.current == None):
print('resetting')
self.reset()
while (self.current.next != None):
self.next()
self.current.next = n
self.next()
def printAll(self):
self.reset()
print('LINKED LIST')
currentNodeIndex = 1
while self.next() != False:
print(str(currentNodeIndex) + ' ' + str(self.current))
currentNodeIndex += 1
# self.next()
self.reset()
def findKLast(self, n):
if (self.start == None):
return False
listLength = 0
self.reset()
for i in range(0, n):
listLength += 1
if (self.next() == False):
return False
nodeToReturn = self.start
while(self.next() != False):
listLength += 1
nodeToReturn = nodeToReturn.next
nodeIndexToReturn = listLength - int(n)
return [nodeIndexToReturn, str(nodeToReturn)]
myLinkedList = LinkedList(Node('Dave'))
print(myLinkedList)
myLinkedList.next()
myLinkedList.next()
myLinkedList.next()
print(myLinkedList)
myLinkedList.reset()
print(myLinkedList)
myLinkedList.push(Node('Nora'))
myLinkedList.push(Node('Luca'))
myLinkedList.push(Node('Riley'))
myLinkedList.push(Node('lol'))
myLinkedList.push(Node('k'))
myLinkedList.printAll()
print(myLinkedList.findKLast(1))
| apache-2.0 |
couchapp/couchapp | tests/test_cli.py | 2 | 9635 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Benoit Chesneau <benoitc@e-engura.org>
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
import os
import tempfile
import shutil
import sys
import unittest2 as unittest
from testconfig import config
from couchapp.errors import ResourceNotFound
from couchapp.client import Database
from couchapp.util import deltree, sh_open
couchapp_dir = os.path.join(os.path.dirname(__file__), '../')
couchapp_cli = os.path.join(os.path.dirname(__file__), '../bin/couchapp')
try:
url = config['host']['url']
except KeyError:
url = 'http://127.0.0.1:5984/'
def _tempdir():
f, fname = tempfile.mkstemp()
os.close(f)
os.unlink(fname)
return fname
class CliTestCase(unittest.TestCase):
def setUp(self):
self.db = Database(url + 'couchapp-test', create=True)
self.tempdir = _tempdir()
os.makedirs(self.tempdir)
self.app_dir = os.path.join(self.tempdir, "my-app")
self.startdir = os.getcwd()
def tearDown(self):
self.db.delete()
deltree(self.tempdir)
os.chdir(self.startdir)
@property
def cmd(self):
return 'cd {0} && couchapp'.format(self.tempdir)
def _make_testapp(self):
testapp_path = os.path.join(os.path.dirname(__file__), 'testapp')
shutil.copytree(testapp_path, self.app_dir)
def _retrieve_ddoc(self):
# any design doc created ?
design_doc = None
try:
design_doc = self.db.open_doc('_design/my-app')
except ResourceNotFound:
pass
self.assertIsNotNone(design_doc)
return design_doc
def test_init_template(self):
os.chdir(self.tempdir)
child_stdout, child_stderr = sh_open(
'{0} init -t default my-app'.format(self.cmd))
appdir = os.path.join(self.tempdir, 'my-app')
self.assertTrue(os.path.isdir(appdir))
cfile = os.path.join(appdir, '.couchapprc')
self.assertTrue(os.path.isfile(cfile))
self.assertTrue(os.path.isdir(os.path.join(appdir, '_attachments')))
self.assertTrue(os.path.isfile(os.path.join(appdir, '_attachments',
'index.html')))
self.assertTrue(os.path.isfile(os.path.join(self.app_dir,
'_attachments',
'style', 'main.css')))
self.assertTrue(os.path.isdir(os.path.join(appdir, 'views')))
self.assertTrue(os.path.isdir(os.path.join(appdir, 'shows')))
self.assertTrue(os.path.isdir(os.path.join(appdir, 'lists')))
def testPush(self):
self._make_testapp()
child_stdout, child_stderr = sh_open(
'{0} push -v my-app {1}couchapp-test'.format(self.cmd, url))
design_doc = self._retrieve_ddoc()
# should create view
self.assertIn('function', design_doc['views']['example']['map'])
# should use macros
self.assertIn('stddev', design_doc['views']['example']['map'])
self.assertIn('ejohn.org', design_doc['shows']['example-show'])
self.assertIn('included by foo.js',
design_doc['shows']['example-show'])
# should create index
content_type = design_doc['_attachments']['index.html']['content_type']
self.assertEqual(content_type, 'text/html')
# should create manifest
self.assertIn('foo/', design_doc['couchapp']['manifest'])
# should push and macro the doc shows
self.assertIn('Generated CouchApp Form Template',
design_doc['shows']['example-show'])
# should push and macro the view lists
self.assertIn('Test XML Feed', design_doc['lists']['feed'])
# should allow deeper includes
self.assertNotIn('"helpers"', design_doc['shows']['example-show'])
# deep require macros
self.assertNotIn('"template"', design_doc['shows']['example-show'])
self.assertIn('Resig', design_doc['shows']['example-show'])
def testPushNoAtomic(self):
self._make_testapp()
child_stdout, child_stderr = sh_open(
'{0} push --no-atomic my-app {1}couchapp-test'.format(self.cmd, url))
design_doc = self._retrieve_ddoc()
# there are 3 revisions (1 doc creation + 2 attachments)
self.assertTrue(design_doc['_rev'].startswith('3-'))
# should create view
self.assertIn('function', design_doc['views']['example']['map'])
# should use macros
self.assertIn('stddev', design_doc['views']['example']['map'])
self.assertIn('ejohn.org', design_doc['shows']['example-show'])
# should create index
content_type = design_doc['_attachments']['index.html']['content_type']
self.assertEqual(content_type, 'text/html')
# should create manifest
self.assertIn('foo/', design_doc['couchapp']['manifest'])
# should push and macro the doc shows
self.assertIn('Generated CouchApp Form Template',
design_doc['shows']['example-show'])
# should push and macro the view lists
self.assertIn('Test XML Feed', design_doc['lists']['feed'])
# should allow deeper includes
self.assertNotIn('"helpers"', design_doc['shows']['example-show'])
# deep require macros
self.assertNotIn('"template"', design_doc['shows']['example-show'])
self.assertIn('Resig', design_doc['shows']['example-show'])
def testClone(self):
self._make_testapp()
child_stdout, child_stderr = sh_open(
'{0} push -v my-app {1}couchapp-test'.format(self.cmd, url))
design_doc = self._retrieve_ddoc()
app_dir = os.path.join(self.tempdir, "couchapp-test")
child_stdout, child_stderr = sh_open('{0} clone {1} {2}'.format(
self.cmd, url + 'couchapp-test/_design/my-app', app_dir))
# should create .couchapprc
self.assertTrue(os.path.isfile(os.path.join(app_dir, ".couchapprc")))
# should clone the views
self.assertTrue(os.path.isdir(os.path.join(app_dir, "views")))
# should create foo/bar.txt file
self.assertTrue(os.path.isfile(os.path.join(app_dir, 'foo/bar.txt')))
# should create lib/helpers/math.js file
self.assertTrue(os.path.isfile(os.path.join(app_dir,
'lib/helpers/math.js')))
# should work when design doc is edited manually
design_doc['test.txt'] = "essai"
design_doc = self.db.save_doc(design_doc)
deltree(app_dir)
child_stdout, child_stderr = sh_open('{0} clone {1} {2}'.format(
self.cmd, url + 'couchapp-test/_design/my-app', app_dir))
self.assertTrue(os.path.isfile(os.path.join(app_dir, 'test.txt')))
# should work when a view is added manually
design_doc["views"]["more"] = {"map":
"function(doc) { emit(null, doc); }"}
design_doc = self.db.save_doc(design_doc)
deltree(app_dir)
child_stdout, child_stderr = sh_open('{0} clone {1} {2}'.format(
self.cmd, url + 'couchapp-test/_design/my-app', app_dir))
self.assertTrue(os.path.isfile(os.path.join(app_dir,
'views/example/map.js')))
# should work without manifest
del design_doc['couchapp']['manifest']
design_doc = self.db.save_doc(design_doc)
deltree(app_dir)
child_stdout, child_stderr = sh_open('{0} clone {1} {2}'.format(
self.cmd, url + 'couchapp-test/_design/my-app', app_dir))
self.assertTrue(os.path.isfile(os.path.join(app_dir,
'views/example/map.js')))
# should create foo/bar without manifest
self.assertTrue(os.path.isfile(os.path.join(app_dir, 'foo/bar')))
# should create lib/helpers.json without manifest
self.assertTrue(os.path.isfile(os.path.join(app_dir,
'lib/helpers.json')))
def testPushApps(self):
os.chdir(self.tempdir)
docsdir = os.path.join(self.tempdir, 'docs')
os.makedirs(docsdir)
# create 2 apps
child_stdout, child_stderr = sh_open(
'{0} init -t default docs/app1'.format(self.cmd))
child_stdout, child_stderr = sh_open(
'{0} init -t default docs/app2'.format(self.cmd))
child_stdout, child_stderr = sh_open(
'{0} pushapps docs/ {1}couchapp-test'.format(self.cmd, url))
alldocs = self.db.all_docs()['rows']
self.assertEqual(len(alldocs), 2)
self.assertEqual('_design/app1', alldocs[0]['id'])
def testPushDocs(self):
os.chdir(self.tempdir)
docsdir = os.path.join(self.tempdir, 'docs')
os.makedirs(docsdir)
# create 2 apps
child_stdout, child_stderr = sh_open(
'{0} init -t default docs/app1'.format(self.cmd))
child_stdout, child_stderr = sh_open(
'{0} init -t default docs/app2'.format(self.cmd))
child_stdout, child_stderr = sh_open(
'{0} pushdocs docs/ {1}couchapp-test'.format(self.cmd, url))
alldocs = self.db.all_docs()['rows']
self.assertEqual(len(alldocs), 2)
self.assertEqual('_design/app1', alldocs[0]['id'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kjc88/sl4a | python/src/Lib/test/test_codecencodings_jp.py | 149 | 3938 | #!/usr/bin/env python
#
# test_codecencodings_jp.py
# Codec encoding tests for Japanese encodings.
#
from test import test_support
from test import test_multibytecodec_support
import unittest
class Test_CP932(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'cp932'
tstring = test_multibytecodec_support.load_teststring('shift_jis')
codectests = (
# invalid bytes
("abc\x81\x00\x81\x00\x82\x84", "strict", None),
("abc\xf8", "strict", None),
("abc\x81\x00\x82\x84", "replace", u"abc\ufffd\uff44"),
("abc\x81\x00\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
("abc\x81\x00\x82\x84", "ignore", u"abc\uff44"),
# sjis vs cp932
("\\\x7e", "replace", u"\\\x7e"),
("\x81\x5f\x81\x61\x81\x7c", "replace", u"\uff3c\u2225\uff0d"),
)
class Test_EUC_JISX0213(test_multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jisx0213'
tstring = test_multibytecodec_support.load_teststring('euc_jisx0213')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
("abc\xc8", "strict", None),
("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u7956"),
("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u7956\ufffd"),
("abc\x80\x80\xc1\xc4", "ignore", u"abc\u7956"),
("abc\x8f\x83\x83", "replace", u"abc\ufffd"),
("\xc1\x64", "strict", None),
("\xa1\xc0", "strict", u"\uff3c"),
)
xmlcharnametest = (
u"\xab\u211c\xbb = \u2329\u1234\u232a",
"\xa9\xa8ℜ\xa9\xb2 = ⟨ሴ⟩"
)
eucjp_commontests = (
("abc\x80\x80\xc1\xc4", "strict", None),
("abc\xc8", "strict", None),
("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u7956"),
("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u7956\ufffd"),
("abc\x80\x80\xc1\xc4", "ignore", u"abc\u7956"),
("abc\x8f\x83\x83", "replace", u"abc\ufffd"),
("\xc1\x64", "strict", None),
)
class Test_EUC_JP_COMPAT(test_multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jp'
tstring = test_multibytecodec_support.load_teststring('euc_jp')
codectests = eucjp_commontests + (
("\xa1\xc0\\", "strict", u"\uff3c\\"),
(u"\xa5", "strict", "\x5c"),
(u"\u203e", "strict", "\x7e"),
)
shiftjis_commonenctests = (
("abc\x80\x80\x82\x84", "strict", None),
("abc\xf8", "strict", None),
("abc\x80\x80\x82\x84", "replace", u"abc\ufffd\uff44"),
("abc\x80\x80\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
("abc\x80\x80\x82\x84def", "ignore", u"abc\uff44def"),
)
class Test_SJIS_COMPAT(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jis'
tstring = test_multibytecodec_support.load_teststring('shift_jis')
codectests = shiftjis_commonenctests + (
("\\\x7e", "strict", u"\\\x7e"),
("\x81\x5f\x81\x61\x81\x7c", "strict", u"\uff3c\u2016\u2212"),
)
class Test_SJISX0213(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jisx0213'
tstring = test_multibytecodec_support.load_teststring('shift_jisx0213')
codectests = (
# invalid bytes
("abc\x80\x80\x82\x84", "strict", None),
("abc\xf8", "strict", None),
("abc\x80\x80\x82\x84", "replace", u"abc\ufffd\uff44"),
("abc\x80\x80\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
("abc\x80\x80\x82\x84def", "ignore", u"abc\uff44def"),
# sjis vs cp932
("\\\x7e", "replace", u"\xa5\u203e"),
("\x81\x5f\x81\x61\x81\x7c", "replace", u"\x5c\u2016\u2212"),
)
xmlcharnametest = (
u"\xab\u211c\xbb = \u2329\u1234\u232a",
"\x85Gℜ\x85Q = ⟨ሴ⟩"
)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.5-py2.5.egg/sqlalchemy/orm/uowdumper.py | 4 | 7115 | # orm/uowdumper.py
# Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Dumps out a string representation of a UOWTask structure"""
from sqlalchemy.orm import unitofwork
from sqlalchemy.orm import util as mapperutil
from sqlalchemy import util
class UOWDumper(unitofwork.UOWExecutor):
def __init__(self, tasks, buf, verbose=False):
self.verbose = verbose
self.indent = 0
self.tasks = tasks
self.buf = buf
self.headers = {}
self.execute(None, tasks)
def execute(self, trans, tasks, isdelete=None):
if isdelete is not True:
for task in tasks:
self._execute(trans, task, False)
if isdelete is not False:
for task in util.reversed(tasks):
self._execute(trans, task, True)
def _execute(self, trans, task, isdelete):
try:
i = self._indent()
if i:
i = i[:-1] + "+-"
self.buf.write(i + " " + self._repr_task(task))
self.buf.write(" (" + (isdelete and "delete " or "save/update ") + "phase) \n")
self.indent += 1
super(UOWDumper, self).execute(trans, [task], isdelete)
finally:
self.indent -= 1
def save_objects(self, trans, task):
# sort elements to be inserted by insert order
def comparator(a, b):
if a.state is None:
x = None
elif not hasattr(a.state, 'insert_order'):
x = None
else:
x = a.state.insert_order
if b.state is None:
y = None
elif not hasattr(b.state, 'insert_order'):
y = None
else:
y = b.state.insert_order
return cmp(x, y)
l = list(task.polymorphic_tosave_elements)
l.sort(comparator)
for rec in l:
if rec.listonly:
continue
self.header("Save elements"+ self._inheritance_tag(task))
self.buf.write(self._indent()[:-1] + "+-" + self._repr_task_element(rec) + "\n")
self.closeheader()
def delete_objects(self, trans, task):
for rec in task.polymorphic_todelete_elements:
if rec.listonly:
continue
self.header("Delete elements"+ self._inheritance_tag(task))
self.buf.write(self._indent() + "- " + self._repr_task_element(rec) + "\n")
self.closeheader()
def _inheritance_tag(self, task):
if not self.verbose:
return ""
else:
return (" (inheriting task %s)" % self._repr_task(task))
def header(self, text):
"""Write a given header just once."""
if not self.verbose:
return
try:
self.headers[text]
except KeyError:
self.buf.write(self._indent() + "- " + text + "\n")
self.headers[text] = True
def closeheader(self):
if not self.verbose:
return
self.buf.write(self._indent() + "- ------\n")
def execute_dependency(self, transaction, dep, isdelete):
self._dump_processor(dep, isdelete)
def execute_save_steps(self, trans, task):
super(UOWDumper, self).execute_save_steps(trans, task)
def execute_delete_steps(self, trans, task):
super(UOWDumper, self).execute_delete_steps(trans, task)
def execute_dependencies(self, trans, task, isdelete=None):
super(UOWDumper, self).execute_dependencies(trans, task, isdelete)
def execute_cyclical_dependencies(self, trans, task, isdelete):
self.header("Cyclical %s dependencies" % (isdelete and "delete" or "save"))
super(UOWDumper, self).execute_cyclical_dependencies(trans, task, isdelete)
self.closeheader()
def execute_per_element_childtasks(self, trans, task, isdelete):
super(UOWDumper, self).execute_per_element_childtasks(trans, task, isdelete)
def execute_element_childtasks(self, trans, element, isdelete):
self.header("%s subelements of UOWTaskElement(%s)" % ((isdelete and "Delete" or "Save"), hex(id(element))))
super(UOWDumper, self).execute_element_childtasks(trans, element, isdelete)
self.closeheader()
def _dump_processor(self, proc, deletes):
if deletes:
val = proc.targettask.polymorphic_todelete_elements
else:
val = proc.targettask.polymorphic_tosave_elements
if self.verbose:
self.buf.write(self._indent() + " +- %s attribute on %s (UOWDependencyProcessor(%d) processing %s)\n" % (
repr(proc.processor.key),
("%s's to be %s" % (self._repr_task_class(proc.targettask), deletes and "deleted" or "saved")),
hex(id(proc)),
self._repr_task(proc.targettask))
)
elif False:
self.buf.write(self._indent() + " +- %s attribute on %s\n" % (
repr(proc.processor.key),
("%s's to be %s" % (self._repr_task_class(proc.targettask), deletes and "deleted" or "saved")),
)
)
if len(val) == 0:
if self.verbose:
self.buf.write(self._indent() + " +- " + "(no objects)\n")
for v in val:
self.buf.write(self._indent() + " +- " + self._repr_task_element(v, proc.processor.key, process=True) + "\n")
def _repr_task_element(self, te, attribute=None, process=False):
if getattr(te, 'state', None) is None:
objid = "(placeholder)"
else:
if attribute is not None:
objid = "%s.%s" % (mapperutil.state_str(te.state), attribute)
else:
objid = mapperutil.state_str(te.state)
if self.verbose:
return "%s (UOWTaskElement(%s, %s))" % (objid, hex(id(te)), (te.listonly and 'listonly' or (te.isdelete and 'delete' or 'save')))
elif process:
return "Process %s" % (objid)
else:
return "%s %s" % ((te.isdelete and "Delete" or "Save"), objid)
def _repr_task(self, task):
if task.mapper is not None:
if task.mapper.__class__.__name__ == 'Mapper':
name = task.mapper.class_.__name__ + "/" + task.mapper.local_table.description + "/" + str(task.mapper.entity_name)
else:
name = repr(task.mapper)
else:
name = '(none)'
sd = getattr(task, '_superduper', False)
if sd:
return ("SD UOWTask(%s, %s)" % (hex(id(task)), name))
else:
return ("UOWTask(%s, %s)" % (hex(id(task)), name))
def _repr_task_class(self, task):
if task.mapper is not None and task.mapper.__class__.__name__ == 'Mapper':
return task.mapper.class_.__name__
else:
return '(none)'
def _indent(self):
return " |" * self.indent
| bsd-3-clause |
nan86150/ImageFusion | lib/python2.7/site-packages/pip/commands/__init__.py | 280 | 2007 | """
Package containing all pip commands
"""
from __future__ import absolute_import
from pip.commands.completion import CompletionCommand
from pip.commands.freeze import FreezeCommand
from pip.commands.help import HelpCommand
from pip.commands.list import ListCommand
from pip.commands.search import SearchCommand
from pip.commands.show import ShowCommand
from pip.commands.install import InstallCommand
from pip.commands.uninstall import UninstallCommand
from pip.commands.wheel import WheelCommand
commands_dict = {
CompletionCommand.name: CompletionCommand,
FreezeCommand.name: FreezeCommand,
HelpCommand.name: HelpCommand,
SearchCommand.name: SearchCommand,
ShowCommand.name: ShowCommand,
InstallCommand.name: InstallCommand,
UninstallCommand.name: UninstallCommand,
ListCommand.name: ListCommand,
WheelCommand.name: WheelCommand,
}
commands_order = [
InstallCommand,
UninstallCommand,
FreezeCommand,
ListCommand,
ShowCommand,
SearchCommand,
WheelCommand,
HelpCommand,
]
def get_summaries(ignore_hidden=True, ordered=True):
"""Yields sorted (command name, command summary) tuples."""
if ordered:
cmditems = _sort_commands(commands_dict, commands_order)
else:
cmditems = commands_dict.items()
for name, command_class in cmditems:
if ignore_hidden and command_class.hidden:
continue
yield (name, command_class.summary)
def get_similar_commands(name):
"""Command name auto-correct."""
from difflib import get_close_matches
name = name.lower()
close_commands = get_close_matches(name, commands_dict.keys())
if close_commands:
return close_commands[0]
else:
return False
def _sort_commands(cmddict, order):
def keyfn(key):
try:
return order.index(key[1])
except ValueError:
# unordered items should come last
return 0xff
return sorted(cmddict.items(), key=keyfn)
| mit |
JavML/django | tests/gis_tests/geoapp/test_regress.py | 318 | 3857 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from django.contrib.gis.db.models import Extent
from django.contrib.gis.shortcuts import render_to_kmz
from django.db.models import Count, Min
from django.test import TestCase, skipUnlessDBFeature
from ..utils import no_oracle
from .models import City, PennsylvaniaCity, State, Truth
@skipUnlessDBFeature("gis_enabled")
class GeoRegressionTests(TestCase):
fixtures = ['initial']
def test_update(self):
"Testing GeoQuerySet.update(). See #10411."
pnt = City.objects.get(name='Pueblo').point
bak = pnt.clone()
pnt.y += 0.005
pnt.x += 0.005
City.objects.filter(name='Pueblo').update(point=pnt)
self.assertEqual(pnt, City.objects.get(name='Pueblo').point)
City.objects.filter(name='Pueblo').update(point=bak)
self.assertEqual(bak, City.objects.get(name='Pueblo').point)
def test_kmz(self):
"Testing `render_to_kmz` with non-ASCII data. See #11624."
name = "Åland Islands"
places = [{
'name': name,
'description': name,
'kml': '<Point><coordinates>5.0,23.0</coordinates></Point>'
}]
render_to_kmz('gis/kml/placemarks.kml', {'places': places})
@skipUnlessDBFeature("supports_extent_aggr")
def test_extent(self):
"Testing `extent` on a table with a single point. See #11827."
pnt = City.objects.get(name='Pueblo').point
ref_ext = (pnt.x, pnt.y, pnt.x, pnt.y)
extent = City.objects.filter(name='Pueblo').aggregate(Extent('point'))['point__extent']
for ref_val, val in zip(ref_ext, extent):
self.assertAlmostEqual(ref_val, val, 4)
def test_unicode_date(self):
"Testing dates are converted properly, even on SpatiaLite. See #16408."
founded = datetime(1857, 5, 23)
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)',
founded=founded)
self.assertEqual(founded, PennsylvaniaCity.objects.datetimes('founded', 'day')[0])
self.assertEqual(founded, PennsylvaniaCity.objects.aggregate(Min('founded'))['founded__min'])
def test_empty_count(self):
"Testing that PostGISAdapter.__eq__ does check empty strings. See #13670."
# contrived example, but need a geo lookup paired with an id__in lookup
pueblo = City.objects.get(name='Pueblo')
state = State.objects.filter(poly__contains=pueblo.point)
cities_within_state = City.objects.filter(id__in=state)
# .count() should not throw TypeError in __eq__
self.assertEqual(cities_within_state.count(), 1)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test_defer_or_only_with_annotate(self):
"Regression for #16409. Make sure defer() and only() work with annotate()"
self.assertIsInstance(list(City.objects.annotate(Count('point')).defer('name')), list)
self.assertIsInstance(list(City.objects.annotate(Count('point')).only('name')), list)
def test_boolean_conversion(self):
"Testing Boolean value conversion with the spatial backend, see #15169."
t1 = Truth.objects.create(val=True)
t2 = Truth.objects.create(val=False)
val1 = Truth.objects.get(pk=t1.pk).val
val2 = Truth.objects.get(pk=t2.pk).val
# verify types -- shouldn't be 0/1
self.assertIsInstance(val1, bool)
self.assertIsInstance(val2, bool)
# verify values
self.assertEqual(val1, True)
self.assertEqual(val2, False)
| bsd-3-clause |
cmichal/python-social-auth | social/tests/backends/test_saml.py | 49 | 4569 | import re
import json
import sys
import unittest2
import requests
from os import path
from mock import patch
from httpretty import HTTPretty
try:
from onelogin.saml2.utils import OneLogin_Saml2_Utils
except ImportError:
# Only available for python 2.7 at the moment, so don't worry if this fails
pass
from social.tests.backends.base import BaseBackendTest
from social.p3 import urlparse, urlunparse, urlencode, parse_qs
DATA_DIR = path.join(path.dirname(__file__), 'data')
@unittest2.skipUnless(
sys.version_info[:2] == (2, 7),
'python-saml currently depends on 2.7; 3+ support coming soon')
@unittest2.skipIf('__pypy__' in sys.builtin_module_names,
'dm.xmlsec not compatible with pypy')
class SAMLTest(BaseBackendTest):
backend_path = 'social.backends.saml.SAMLAuth'
expected_username = 'myself'
def extra_settings(self):
name = path.join(DATA_DIR, 'saml_config.json')
with open(name, 'r') as config_file:
config_str = config_file.read()
return json.loads(config_str)
def setUp(self):
"""Patch the time so that we can replay canned
request/response pairs"""
super(SAMLTest, self).setUp()
@staticmethod
def fixed_time():
return OneLogin_Saml2_Utils.parse_SAML_to_time(
'2015-05-09T03:57:22Z'
)
now_patch = patch.object(OneLogin_Saml2_Utils, 'now', fixed_time)
now_patch.start()
self.addCleanup(now_patch.stop)
def install_http_intercepts(self, start_url, return_url):
# When we request start_url
# (https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO...)
# we will eventually get a redirect back, with SAML assertion
# data in the query string. A pre-recorded correct response
# is kept in this .txt file:
name = path.join(DATA_DIR, 'saml_response.txt')
with open(name, 'r') as response_file:
response_url = response_file.read()
HTTPretty.register_uri(HTTPretty.GET, start_url, status=301,
location=response_url)
HTTPretty.register_uri(HTTPretty.GET, return_url, status=200,
body='foobar')
def do_start(self):
# pretend we've started with a URL like /login/saml/?idp=testshib:
self.strategy.set_request_data({'idp': 'testshib'}, self.backend)
start_url = self.backend.start().url
# Modify the start URL to make the SAML request consistent
# from test to test:
start_url = self.modify_start_url(start_url)
# If the SAML Identity Provider recognizes the user, we will
# be redirected back to:
return_url = self.backend.redirect_uri
self.install_http_intercepts(start_url, return_url)
response = requests.get(start_url)
self.assertTrue(response.url.startswith(return_url))
self.assertEqual(response.text, 'foobar')
query_values = dict((k, v[0]) for k, v in
parse_qs(urlparse(response.url).query).items())
self.assertNotIn(' ', query_values['SAMLResponse'])
self.strategy.set_request_data(query_values, self.backend)
return self.backend.complete()
def test_metadata_generation(self):
"""Test that we can generate the metadata without error"""
xml, errors = self.backend.generate_metadata_xml()
self.assertEqual(len(errors), 0)
self.assertEqual(xml[0], '<')
def test_login(self):
"""Test that we can authenticate with a SAML IdP (TestShib)"""
self.do_login()
def modify_start_url(self, start_url):
"""
Given a SAML redirect URL, parse it and change the ID to
a consistent value, so the request is always identical.
"""
# Parse the SAML Request URL to get the XML being sent to TestShib
url_parts = urlparse(start_url)
query = dict((k, v[0]) for (k, v) in
parse_qs(url_parts.query).iteritems())
xml = OneLogin_Saml2_Utils.decode_base64_and_inflate(
query['SAMLRequest']
)
# Modify the XML:
xml, changed = re.subn(r'ID="[^"]+"', 'ID="TEST_ID"', xml)
self.assertEqual(changed, 1)
# Update the URL to use the modified query string:
query['SAMLRequest'] = OneLogin_Saml2_Utils.deflate_and_base64_encode(
xml
)
url_parts = list(url_parts)
url_parts[4] = urlencode(query)
return urlunparse(url_parts)
| bsd-3-clause |
thomasyu888/Genie | genie/validate.py | 1 | 10061 | #!/usr/bin/env python3
import importlib
import inspect
import logging
import sys
import synapseclient
try:
from synapseclient.core.exceptions import SynapseHTTPError
except ModuleNotFoundError:
from synapseclient.exceptions import SynapseHTTPError
from . import config
from . import example_filetype_format
from . import process_functions
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ValidationHelper(object):
# Used for the kwargs in validate_single_file
# Overload this per class
_validate_kwargs = []
def __init__(self, syn, project_id, center, entitylist,
format_registry=config.PROCESS_FILES,
file_type=None):
"""A validator helper class for a center's files.
Args:
syn: a synapseclient.Synapse object
project_id: Synapse Project ID where files are stored and configured.
center: The participating center name.
filepathlist: a list of file paths.
format_registry: A dictionary mapping file format name to the
format class.
file_type: Specify file type to skip filename validation
"""
self._synapse_client = syn
self._project = syn.get(project_id)
self.entitylist = entitylist
self.center = center
self._format_registry = format_registry
self.file_type = (self.determine_filetype()
if file_type is None else file_type)
def determine_filetype(self):
"""Gets the file type of the file by validating its filename
Args:
syn: Synapse object
filepathlist: list of filepaths to center files
Returns:
str: File type of input files. None if no filetype found
"""
filetype = None
# Loop through file formats
for file_format in self._format_registry:
validator = self._format_registry[file_format](self._synapse_client, self.center)
try:
filenames = [entity.name for entity in self.entitylist]
filetype = validator.validateFilename(filenames)
except AssertionError:
continue
# If valid filename, return file type.
if filetype is not None:
break
return filetype
def validate_single_file(self, **kwargs):
"""Validate a submitted file unit.
Returns:
message: errors and warnings
valid: Boolean value of validation status
"""
if self.file_type not in self._format_registry:
valid = False
errors = "Your filename is incorrect! Please change your filename before you run the validator or specify --filetype if you are running the validator locally"
warnings = ""
else:
mykwargs = {}
for required_parameter in self._validate_kwargs:
assert required_parameter in kwargs.keys(), \
"%s not in parameter list" % required_parameter
mykwargs[required_parameter] = kwargs[required_parameter]
validator_cls = self._format_registry[self.file_type]
validator = validator_cls(self._synapse_client, self.center)
filepathlist = [entity.path for entity in self.entitylist]
valid, errors, warnings = validator.validate(filePathList=filepathlist,
**mykwargs)
# Complete error message
message = collect_errors_and_warnings(errors, warnings)
return (valid, message)
class GenieValidationHelper(ValidationHelper):
"""A validator helper class for AACR Project Genie.
"""
_validate_kwargs = ['oncotree_link', 'nosymbol_check']
def collect_errors_and_warnings(errors, warnings):
'''Aggregates error and warnings into a string.
Args:
errors: string of file errors, separated by new lines.
warnings: string of file warnings, separated by new lines.
Returns:
message - errors + warnings
'''
# Complete error message
message = "----------------ERRORS----------------\n"
if errors == "":
message = "YOUR FILE IS VALIDATED!\n"
logger.info(message)
else:
for error in errors.split("\n"):
if error != '':
logger.error(error)
message += errors
if warnings != "":
for warning in warnings.split("\n"):
if warning != '':
logger.warning(warning)
message += "-------------WARNINGS-------------\n" + warnings
return message
def get_config(syn, synid):
"""Gets Synapse database to Table mapping in dict
Args:
syn: Synapse connection
synid: Synapse id of database mapping table
Returns:
dict: {'databasename': 'synid'}
"""
config = syn.tableQuery('SELECT * FROM {}'.format(synid))
configdf = config.asDataFrame()
configdf.index = configdf['Database']
config_dict = configdf.to_dict()
return config_dict['Id']
def _check_parentid_permission_container(syn, parentid):
"""Checks permission / container
# TODO: Currently only checks if a user has READ permissions
"""
if parentid is not None:
try:
syn_ent = syn.get(parentid, downloadFile=False)
# If not container, throw an assertion
assert synapseclient.entity.is_container(syn_ent)
except (SynapseHTTPError, AssertionError):
raise ValueError(
"Provided Synapse id must be your input folder Synapse id "
"or a Synapse Id of a folder inside your input directory")
def _check_center_input(center, center_list):
"""Checks center input
Args:
center: Center name
center_list: List of allowed centers
Raises:
ValueError: If specify a center not part of the center list
"""
if center not in center_list:
raise ValueError("Must specify one of these "
f"centers: {', '.join(center_list)}")
def _get_oncotreelink(syn, databasetosynid_mappingdf, oncotree_link=None):
"""
Gets oncotree link unless a link is specified by the user
Args:
syn: Synapse object
databasetosynid_mappingdf: database to synid mapping
oncotree_link: link to oncotree. Default is None
Returns:
oncotree link
"""
if oncotree_link is None:
oncolink = databasetosynid_mappingdf.query(
'Database == "oncotreeLink"').Id
oncolink_ent = syn.get(oncolink.iloc[0])
oncotree_link = oncolink_ent.externalURL
return oncotree_link
def _upload_to_synapse(syn, filepaths, valid, parentid=None):
"""
Upload to synapse if parentid is specified and valid
Args:
syn: Synapse object
filepaths: List of file paths
valid: Boolean value for validity of file
parentid: Synapse id of container. Default is None
"""
if parentid is not None and valid:
logger.info("Uploading file to {}".format(parentid))
for path in filepaths:
file_ent = synapseclient.File(path, parent=parentid)
ent = syn.store(file_ent)
logger.info("Stored to {}".format(ent.id))
def collect_format_types(package_names):
"""Finds subclasses of the example_filetype_format.FileTypeFormat
from a list of package names.
Args:
package_names: A list of Python package names as strings.
Returns:
A list of classes that are in the named packages and subclasses
of example_filetype_format.FileTypeFormat
"""
file_format_list = []
for package_name in package_names:
importlib.import_module(package_name)
for cls in config.get_subclasses(example_filetype_format.FileTypeFormat):
logger.debug("checking {}.".format(cls))
cls_module_name = cls.__module__
cls_pkg = cls_module_name.split('.')[0]
if cls_pkg in package_names:
file_format_list.append(cls)
file_format_dict = config.make_format_registry_dict(file_format_list)
return file_format_dict
def _perform_validate(syn, args):
"""This is the main entry point to the genie command line tool."""
# Check parentid argparse
_check_parentid_permission_container(syn, args.parentid)
databasetosynid_mappingdf = process_functions.get_synid_database_mappingdf(
syn, project_id=args.project_id)
synid = databasetosynid_mappingdf.query('Database == "centerMapping"').Id
center_mapping = syn.tableQuery('select * from {}'.format(synid.iloc[0]))
center_mapping_df = center_mapping.asDataFrame()
# Check center argparse
_check_center_input(args.center, center_mapping_df.center.tolist())
args.oncotree_link = _get_oncotreelink(syn, databasetosynid_mappingdf,
oncotree_link=args.oncotree_link)
format_registry = collect_format_types(args.format_registry_packages)
logger.debug("Using {} file formats.".format(format_registry))
entity_list = [synapseclient.File(name=filepath, path=filepath,
parentId=None)
for filepath in args.filepath]
validator = GenieValidationHelper(syn=syn, project_id=args.project_id,
center=args.center,
entitylist=entity_list,
format_registry=format_registry,
file_type=args.filetype)
mykwargs = dict(oncotree_link=args.oncotree_link,
nosymbol_check=args.nosymbol_check,
project_id=args.project_id)
valid, message = validator.validate_single_file(**mykwargs)
# Upload to synapse if parentid is specified and valid
_upload_to_synapse(syn, args.filepath, valid, parentid=args.parentid)
| mit |
mrquim/mrquimrepo | script.module.unidecode/lib/unidecode/x061.py | 252 | 4662 | data = (
'Qiao ', # 0x00
'Chou ', # 0x01
'Bei ', # 0x02
'Xuan ', # 0x03
'Wei ', # 0x04
'Ge ', # 0x05
'Qian ', # 0x06
'Wei ', # 0x07
'Yu ', # 0x08
'Yu ', # 0x09
'Bi ', # 0x0a
'Xuan ', # 0x0b
'Huan ', # 0x0c
'Min ', # 0x0d
'Bi ', # 0x0e
'Yi ', # 0x0f
'Mian ', # 0x10
'Yong ', # 0x11
'Kai ', # 0x12
'Dang ', # 0x13
'Yin ', # 0x14
'E ', # 0x15
'Chen ', # 0x16
'Mou ', # 0x17
'Ke ', # 0x18
'Ke ', # 0x19
'Yu ', # 0x1a
'Ai ', # 0x1b
'Qie ', # 0x1c
'Yan ', # 0x1d
'Nuo ', # 0x1e
'Gan ', # 0x1f
'Yun ', # 0x20
'Zong ', # 0x21
'Sai ', # 0x22
'Leng ', # 0x23
'Fen ', # 0x24
'[?] ', # 0x25
'Kui ', # 0x26
'Kui ', # 0x27
'Que ', # 0x28
'Gong ', # 0x29
'Yun ', # 0x2a
'Su ', # 0x2b
'Su ', # 0x2c
'Qi ', # 0x2d
'Yao ', # 0x2e
'Song ', # 0x2f
'Huang ', # 0x30
'Ji ', # 0x31
'Gu ', # 0x32
'Ju ', # 0x33
'Chuang ', # 0x34
'Ni ', # 0x35
'Xie ', # 0x36
'Kai ', # 0x37
'Zheng ', # 0x38
'Yong ', # 0x39
'Cao ', # 0x3a
'Sun ', # 0x3b
'Shen ', # 0x3c
'Bo ', # 0x3d
'Kai ', # 0x3e
'Yuan ', # 0x3f
'Xie ', # 0x40
'Hun ', # 0x41
'Yong ', # 0x42
'Yang ', # 0x43
'Li ', # 0x44
'Sao ', # 0x45
'Tao ', # 0x46
'Yin ', # 0x47
'Ci ', # 0x48
'Xu ', # 0x49
'Qian ', # 0x4a
'Tai ', # 0x4b
'Huang ', # 0x4c
'Yun ', # 0x4d
'Shen ', # 0x4e
'Ming ', # 0x4f
'[?] ', # 0x50
'She ', # 0x51
'Cong ', # 0x52
'Piao ', # 0x53
'Mo ', # 0x54
'Mu ', # 0x55
'Guo ', # 0x56
'Chi ', # 0x57
'Can ', # 0x58
'Can ', # 0x59
'Can ', # 0x5a
'Cui ', # 0x5b
'Min ', # 0x5c
'Te ', # 0x5d
'Zhang ', # 0x5e
'Tong ', # 0x5f
'Ao ', # 0x60
'Shuang ', # 0x61
'Man ', # 0x62
'Guan ', # 0x63
'Que ', # 0x64
'Zao ', # 0x65
'Jiu ', # 0x66
'Hui ', # 0x67
'Kai ', # 0x68
'Lian ', # 0x69
'Ou ', # 0x6a
'Song ', # 0x6b
'Jin ', # 0x6c
'Yin ', # 0x6d
'Lu ', # 0x6e
'Shang ', # 0x6f
'Wei ', # 0x70
'Tuan ', # 0x71
'Man ', # 0x72
'Qian ', # 0x73
'She ', # 0x74
'Yong ', # 0x75
'Qing ', # 0x76
'Kang ', # 0x77
'Di ', # 0x78
'Zhi ', # 0x79
'Lou ', # 0x7a
'Juan ', # 0x7b
'Qi ', # 0x7c
'Qi ', # 0x7d
'Yu ', # 0x7e
'Ping ', # 0x7f
'Liao ', # 0x80
'Cong ', # 0x81
'You ', # 0x82
'Chong ', # 0x83
'Zhi ', # 0x84
'Tong ', # 0x85
'Cheng ', # 0x86
'Qi ', # 0x87
'Qu ', # 0x88
'Peng ', # 0x89
'Bei ', # 0x8a
'Bie ', # 0x8b
'Chun ', # 0x8c
'Jiao ', # 0x8d
'Zeng ', # 0x8e
'Chi ', # 0x8f
'Lian ', # 0x90
'Ping ', # 0x91
'Kui ', # 0x92
'Hui ', # 0x93
'Qiao ', # 0x94
'Cheng ', # 0x95
'Yin ', # 0x96
'Yin ', # 0x97
'Xi ', # 0x98
'Xi ', # 0x99
'Dan ', # 0x9a
'Tan ', # 0x9b
'Duo ', # 0x9c
'Dui ', # 0x9d
'Dui ', # 0x9e
'Su ', # 0x9f
'Jue ', # 0xa0
'Ce ', # 0xa1
'Xiao ', # 0xa2
'Fan ', # 0xa3
'Fen ', # 0xa4
'Lao ', # 0xa5
'Lao ', # 0xa6
'Chong ', # 0xa7
'Han ', # 0xa8
'Qi ', # 0xa9
'Xian ', # 0xaa
'Min ', # 0xab
'Jing ', # 0xac
'Liao ', # 0xad
'Wu ', # 0xae
'Can ', # 0xaf
'Jue ', # 0xb0
'Cu ', # 0xb1
'Xian ', # 0xb2
'Tan ', # 0xb3
'Sheng ', # 0xb4
'Pi ', # 0xb5
'Yi ', # 0xb6
'Chu ', # 0xb7
'Xian ', # 0xb8
'Nao ', # 0xb9
'Dan ', # 0xba
'Tan ', # 0xbb
'Jing ', # 0xbc
'Song ', # 0xbd
'Han ', # 0xbe
'Jiao ', # 0xbf
'Wai ', # 0xc0
'Huan ', # 0xc1
'Dong ', # 0xc2
'Qin ', # 0xc3
'Qin ', # 0xc4
'Qu ', # 0xc5
'Cao ', # 0xc6
'Ken ', # 0xc7
'Xie ', # 0xc8
'Ying ', # 0xc9
'Ao ', # 0xca
'Mao ', # 0xcb
'Yi ', # 0xcc
'Lin ', # 0xcd
'Se ', # 0xce
'Jun ', # 0xcf
'Huai ', # 0xd0
'Men ', # 0xd1
'Lan ', # 0xd2
'Ai ', # 0xd3
'Lin ', # 0xd4
'Yan ', # 0xd5
'Gua ', # 0xd6
'Xia ', # 0xd7
'Chi ', # 0xd8
'Yu ', # 0xd9
'Yin ', # 0xda
'Dai ', # 0xdb
'Meng ', # 0xdc
'Ai ', # 0xdd
'Meng ', # 0xde
'Dui ', # 0xdf
'Qi ', # 0xe0
'Mo ', # 0xe1
'Lan ', # 0xe2
'Men ', # 0xe3
'Chou ', # 0xe4
'Zhi ', # 0xe5
'Nuo ', # 0xe6
'Nuo ', # 0xe7
'Yan ', # 0xe8
'Yang ', # 0xe9
'Bo ', # 0xea
'Zhi ', # 0xeb
'Kuang ', # 0xec
'Kuang ', # 0xed
'You ', # 0xee
'Fu ', # 0xef
'Liu ', # 0xf0
'Mie ', # 0xf1
'Cheng ', # 0xf2
'[?] ', # 0xf3
'Chan ', # 0xf4
'Meng ', # 0xf5
'Lan ', # 0xf6
'Huai ', # 0xf7
'Xuan ', # 0xf8
'Rang ', # 0xf9
'Chan ', # 0xfa
'Ji ', # 0xfb
'Ju ', # 0xfc
'Huan ', # 0xfd
'She ', # 0xfe
'Yi ', # 0xff
)
| gpl-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_tkagg.py | 10 | 40267 | # Todd Miller jmiller@stsci.edu
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import tkinter as Tk
from six.moves import tkinter_filedialog as FileDialog
import os, sys, math
import os.path
# Paint image to Tk photo blitter extension
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.backends.windowing as windowing
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import (ShowBase, ToolContainerBase,
StatusbarBase)
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
from six.moves import tkinter_messagebox as tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class Show(ShowBase):
def mainloop(self):
Tk.mainloop()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
_focus = windowing.FocusManager()
window = Tk.Tk()
window.withdraw()
if Tk.TkVersion >= 8.5:
# put a mpl icon on the window rather than the default tk icon. Tkinter
# doesn't allow colour icons on linux systems, but tk >=8.5 has a iconphoto
# command which we call directly. Source:
# http://mail.python.org/pipermail/tkinter-discuss/2006-November/000954.html
icon_fname = os.path.join(rcParams['datapath'], 'images', 'matplotlib.ppm')
icon_img = Tk.PhotoImage(file=icon_fname)
try:
window.tk.call('wm', 'iconphoto', window._w, icon_img)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# log the failure, but carry on
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
canvas.draw_idle()
return figManager
class TimerTk(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Tk's timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
self.parent = parent
self._timer = None
def _timer_start(self):
self._timer_stop()
self._timer = self.parent.after(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
self.parent.after_cancel(self._timer)
self._timer = None
def _on_timer(self):
TimerBase._on_timer(self)
# Tk after() is only a single shot, so we need to add code here to
# reset the timer if we're not operating in single shot mode.
if not self._single and len(self.callbacks) > 0:
self._timer = self.parent.after(self._interval, self._on_timer)
else:
self._timer = None
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65515 : 'super',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
_keycode_lookup = {
262145: 'control',
524320: 'alt',
524352: 'alt',
1048584: 'super',
1048592: 'super',
131074: 'shift',
131076: 'shift',
}
"""_keycode_lookup is used for badly mapped (i.e. no event.key_sym set)
keys on apple keyboards."""
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
self._idle_callback = None
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=0,
highlightthickness=0)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w//2, h//2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<Double-Button-1>", "<Double-Button-2>", "<Double-Button-3>":
self._tkcanvas.bind(name, self.button_dblclick_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows, "+")
# Can't get destroy events by binding to _tkcanvas. Therefore, bind
# to the window and filter.
def filter_destroy(evt):
if evt.widget is self._tkcanvas:
self.close_event()
root.bind("<Destroy>", filter_destroy, "+")
self._master = master
self._tkcanvas.focus_set()
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch, forward=False)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=int(width), height=int(height))
self._tkcanvas.create_image(int(width/2),int(height/2),image=self._tkphoto)
self.resize_event()
self.show()
# a resizing will in general move the pointer position
# relative to the canvas, so process it as a motion notify
# event. An intended side effect of this call is to allow
# window raises (which trigger a resize) to get the cursor
# position to the mpl event framework so key presses which are
# over the axes will work w/o clicks or explicit motion
self._update_pointer_position(event)
def _update_pointer_position(self, guiEvent=None):
"""
Figure out if we are inside the canvas or not and update the
canvas enter/leave events
"""
# if the pointer if over the canvas, set the lastx and lasty
# attrs of the canvas so it can process event w/o mouse click
# or move
# the window's upper, left coords in screen coords
xw = self._tkcanvas.winfo_rootx()
yw = self._tkcanvas.winfo_rooty()
# the pointer's location in screen coords
xp, yp = self._tkcanvas.winfo_pointerxy()
# not figure out the canvas coordinates of the pointer
xc = xp - xw
yc = yp - yw
# flip top/bottom
yc = self.figure.bbox.height - yc
# JDH: this method was written originally to get the pointer
# location to the backend lastx and lasty attrs so that events
# like KeyEvent can be handled without mouse events. e.g., if
# the cursor is already above the axes, then key presses like
# 'g' should toggle the grid. In order for this to work in
# backend_bases, the canvas needs to know _lastx and _lasty.
# There are three ways to get this info the canvas:
#
# 1) set it explicity
#
# 2) call enter/leave events explicity. The downside of this
# in the impl below is that enter could be repeatedly
# triggered if thes mouse is over the axes and one is
# resizing with the keyboard. This is not entirely bad,
# because the mouse position relative to the canvas is
# changing, but it may be surprising to get repeated entries
# without leaves
#
# 3) process it as a motion notify event. This also has pros
# and cons. The mouse is moving relative to the window, but
# this may surpise an event handler writer who is getting
# motion_notify_events even if the mouse has not moved
# here are the three scenarios
if 1:
# just manually set it
self._lastx, self._lasty = xc, yc
elif 0:
# alternate implementation: process it as a motion
FigureCanvasBase.motion_notify_event(self, xc, yc, guiEvent)
elif 0:
# alternate implementation -- process enter/leave events
# instead of motion/notify
if self.figure.bbox.contains(xc, yc):
self.enter_notify_event(guiEvent, xy=(xc,yc))
else:
self.leave_notify_event(guiEvent)
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
if self._idle is False:
return
self._idle = False
def idle_draw(*args):
try:
self.draw()
finally:
self._idle = True
self._idle_callback = self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event, dblclick=False):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, dblclick=dblclick, guiEvent=event)
def button_dblclick_event(self,event):
self.button_press_event(event,dblclick=True)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = +1
elif num==5: step = -1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val == 0 and sys.platform == 'darwin' and \
event.keycode in self._keycode_lookup:
key = self._keycode_lookup[event.keycode]
elif val < 256:
key = chr(val)
else:
key = None
# add modifier keys to the key string. Bit details originate from
# http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm
# BIT_SHIFT = 0x001; BIT_CAPSLOCK = 0x002; BIT_CONTROL = 0x004;
# BIT_LEFT_ALT = 0x008; BIT_NUMLOCK = 0x010; BIT_RIGHT_ALT = 0x080;
# BIT_MB_1 = 0x100; BIT_MB_2 = 0x200; BIT_MB_3 = 0x400;
# In general, the modifier key is excluded from the modifier flag,
# however this is not the case on "darwin", so double check that
# we aren't adding repeat modifier flags to a modifier key.
if sys.platform == 'win32':
modifiers = [(17, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
elif sys.platform == 'darwin':
modifiers = [(3, 'super', 'super'),
(4, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
else:
modifiers = [(6, 'super', 'super'),
(3, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
if key is not None:
# note, shift is not added to the keys as this is already accounted for
for bitmask, prefix, key_name in modifiers:
if event.state & (1 << bitmask) and key_name not in key:
key = '{0}+{1}'.format(prefix, key)
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerTk(self._tkcanvas, *args, **kwargs)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.set_window_title("Figure %d" % num)
self.canvas = canvas
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._num = num
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar()
self.statusbar = None
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarTk(self.window, self.toolmanager)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolmanager is not None:
pass
elif self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def _get_toolbar(self):
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2TkAgg(self.canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarTk(self.toolmanager, self.window)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
if rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas)
else:
toolmanager = None
return toolmanager
def resize(self, width, height=None):
# before 09-12-22, the resize method takes a single *event*
# parameter. On the other hand, the resize method of other
# FigureManager class takes *width* and *height* parameter,
# which is used to change the size of the window. For the
# Figure.set_size_inches with forward=True work with Tk
# backend, I changed the function signature but tried to keep
# it backward compatible. -JJL
# when a single parameter is given, consider it as a event
if height is None:
width = width.width
else:
self.canvas._tkcanvas.master.geometry("%dx%d" % (width, height))
if self.toolbar is not None:
self.toolbar.configure(width=width)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
_focus = windowing.FocusManager()
if not self._shown:
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
self.canvas._tkcanvas.bind("<Destroy>", destroy)
self.window.deiconify()
# anim.py requires this
self.window.update()
else:
self.canvas.draw_idle()
# Raise the new window.
self.canvas.manager.window.attributes('-topmost', 1)
self.canvas.manager.window.attributes('-topmost', 0)
self._shown = True
def destroy(self, *args):
if self.window is not None:
#self.toolbar.destroy()
if self.canvas._idle_callback:
self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
self.window.destroy()
if Gcf.get_num_fig_managers()==0:
if self.window is not None:
self.window.quit()
self.window = None
def get_window_title(self):
return self.window.wm_title()
def set_window_title(self, title):
self.window.wm_title(title)
def full_screen_toggle(self):
is_fullscreen = bool(self.window.attributes('-fullscreen'))
self.window.attributes('-fullscreen', not is_fullscreen)
class AxisMenu(object):
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command, extension='.gif'):
img_file = os.path.join(rcParams['datapath'], 'images', file + extension)
im = Tk.PhotoImage(master=self, file=img_file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=int(width), height=int(height),
borderwidth=2)
self.update() # Make axes menu
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
# spacer, unhandled in Tk
pass
else:
button = self._Button(text=text, file=image_file,
command=getattr(self, callback))
if tooltip_text is not None:
ToolTip.createToolTip(button, tooltip_text)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self, *args):
from six.moves import tkinter_tkfiledialog, tkinter_messagebox
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
#defaultextension = self.canvas.get_default_filetype()
defaultextension = ''
initialdir = rcParams.get('savefig.directory', '')
initialdir = os.path.expanduser(initialdir)
initialfile = self.canvas.get_default_filename()
fname = tkinter_tkfiledialog.asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes=tk_filetypes,
defaultextension=defaultextension,
initialdir=initialdir,
initialfile=initialfile,
)
if fname == "" or fname == ():
return
else:
if initialdir == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = initialdir
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception as e:
tkinter_messagebox.showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
class ToolTip(object):
"""
Tooltip recipe from
http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387
"""
@staticmethod
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, _ = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + self.widget.winfo_rooty()
self.tipwindow = tw = Tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except Tk.TclError:
pass
label = Tk.Label(tw, text=self.text, justify=Tk.LEFT,
background="#ffffe0", relief=Tk.SOLID, borderwidth=1,
)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
class RubberbandTk(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
def draw_rubberband(self, x0, y0, x1, y1):
height = self.figure.canvas.figure.bbox.height
y0 = height - y0
y1 = height - y1
try:
self.lastrect
except AttributeError:
pass
else:
self.figure.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.figure.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
def remove_rubberband(self):
try:
self.lastrect
except AttributeError:
pass
else:
self.figure.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
class SetCursorTk(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.figure.canvas.manager.window.configure(cursor=cursord[cursor])
class ToolbarTk(ToolContainerBase, Tk.Frame):
def __init__(self, toolmanager, window):
ToolContainerBase.__init__(self, toolmanager)
xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
height, width = 50, xmax - xmin
Tk.Frame.__init__(self, master=window,
width=int(width), height=int(height),
borderwidth=2)
self._toolitems = {}
self.pack(side=Tk.TOP, fill=Tk.X)
self._groups = {}
def add_toolitem(self, name, group, position, image_file, description,
toggle):
frame = self._get_groupframe(group)
button = self._Button(name, image_file, toggle, frame)
if description is not None:
ToolTip.createToolTip(button, description)
self._toolitems.setdefault(name, [])
self._toolitems[name].append(button)
def _get_groupframe(self, group):
if group not in self._groups:
if self._groups:
self._add_separator()
frame = Tk.Frame(master=self, borderwidth=0)
frame.pack(side=Tk.LEFT, fill=Tk.Y)
self._groups[group] = frame
return self._groups[group]
def _add_separator(self):
separator = Tk.Frame(master=self, bd=5, width=1, bg='black')
separator.pack(side=Tk.LEFT, fill=Tk.Y, padx=2)
def _Button(self, text, image_file, toggle, frame):
if image_file is not None:
im = Tk.PhotoImage(master=self, file=image_file)
else:
im = None
if not toggle:
b = Tk.Button(master=frame, text=text, padx=2, pady=2, image=im,
command=lambda: self._button_click(text))
else:
b = Tk.Checkbutton(master=frame, text=text, padx=2, pady=2,
image=im, indicatoron=False,
command=lambda: self._button_click(text))
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _button_click(self, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem in self._toolitems[name]:
if toggled:
toolitem.select()
else:
toolitem.deselect()
def remove_toolitem(self, name):
for toolitem in self._toolitems[name]:
toolitem.pack_forget()
del self._toolitems[name]
class StatusbarTk(StatusbarBase, Tk.Frame):
def __init__(self, window, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
height, width = 50, xmax - xmin
Tk.Frame.__init__(self, master=window,
width=int(width), height=int(height),
borderwidth=2)
self._message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self._message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.TOP, fill=Tk.X)
def set_message(self, s):
self._message.set(s)
class SaveFigureTk(backend_tools.SaveFigureBase):
def trigger(self, *args):
from six.moves import tkinter_tkfiledialog, tkinter_messagebox
filetypes = self.figure.canvas.get_supported_filetypes().copy()
default_filetype = self.figure.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
# defaultextension = self.figure.canvas.get_default_filetype()
defaultextension = ''
initialdir = rcParams.get('savefig.directory', '')
initialdir = os.path.expanduser(initialdir)
initialfile = self.figure.canvas.get_default_filename()
fname = tkinter_tkfiledialog.asksaveasfilename(
master=self.figure.canvas.manager.window,
title='Save the figure',
filetypes=tk_filetypes,
defaultextension=defaultextension,
initialdir=initialdir,
initialfile=initialfile,
)
if fname == "" or fname == ():
return
else:
if initialdir == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = initialdir
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(
six.text_type(fname))
try:
# This method will handle the delegation to the correct type
self.figure.canvas.print_figure(fname)
except Exception as e:
tkinter_messagebox.showerror("Error saving file", str(e))
class ConfigureSubplotsTk(backend_tools.ConfigureSubplotsBase):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def trigger(self, *args):
self.init_window()
self.window.lift()
def init_window(self):
if self.window:
return
toolfig = Figure(figsize=(6, 3))
self.window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=self.window)
toolfig.subplots_adjust(top=0.9)
_tool = SubplotTool(self.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self.window.protocol("WM_DELETE_WINDOW", self.destroy)
def destroy(self, *args, **kwargs):
self.window.destroy()
self.window = None
backend_tools.ToolSaveFigure = SaveFigureTk
backend_tools.ToolConfigureSubplots = ConfigureSubplotsTk
backend_tools.ToolSetCursor = SetCursorTk
backend_tools.ToolRubberband = RubberbandTk
Toolbar = ToolbarTk
FigureCanvas = FigureCanvasTkAgg
FigureManager = FigureManagerTkAgg
| gpl-3.0 |
Allow2CEO/browser-ios | brave/node_modules/bloom-filter-cpp/vendor/depot_tools/third_party/protobuf26/internal/message_listener.py | 590 | 3354 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines a listener interface for observing certain
state transitions on Message objects.
Also defines a null implementation of this interface.
"""
__author__ = 'robinson@google.com (Will Robinson)'
class MessageListener(object):
"""Listens for modifications made to a message. Meant to be registered via
Message._SetListener().
Attributes:
dirty: If True, then calling Modified() would be a no-op. This can be
used to avoid these calls entirely in the common case.
"""
def Modified(self):
"""Called every time the message is modified in such a way that the parent
message may need to be updated. This currently means either:
(a) The message was modified for the first time, so the parent message
should henceforth mark the message as present.
(b) The message's cached byte size became dirty -- i.e. the message was
modified for the first time after a previous call to ByteSize().
Therefore the parent should also mark its byte size as dirty.
Note that (a) implies (b), since new objects start out with a client cached
size (zero). However, we document (a) explicitly because it is important.
Modified() will *only* be called in response to one of these two events --
not every time the sub-message is modified.
Note that if the listener's |dirty| attribute is true, then calling
Modified at the moment would be a no-op, so it can be skipped. Performance-
sensitive callers should check this attribute directly before calling since
it will be true most of the time.
"""
raise NotImplementedError
class NullMessageListener(object):
"""No-op MessageListener implementation."""
def Modified(self):
pass
| mpl-2.0 |
Manojkumar91/odoo_inresto | addons/website_sale_delivery/controllers/main.py | 214 | 1205 | # -*- coding: utf-8 -*-
import openerp
from openerp import http
from openerp.http import request
import openerp.addons.website_sale.controllers.main
class website_sale(openerp.addons.website_sale.controllers.main.website_sale):
@http.route(['/shop/payment'], type='http', auth="public", website=True)
def payment(self, **post):
cr, uid, context = request.cr, request.uid, request.context
order = request.website.sale_get_order(context=context)
carrier_id = post.get('carrier_id')
if carrier_id:
carrier_id = int(carrier_id)
if order:
request.registry['sale.order']._check_carrier_quotation(cr, uid, order, force_carrier_id=carrier_id, context=context)
if carrier_id:
return request.redirect("/shop/payment")
res = super(website_sale, self).payment(**post)
return res
def order_lines_2_google_api(self, order_lines):
""" Transforms a list of order lines into a dict for google analytics """
order_lines_not_delivery = [line for line in order_lines if not line.is_delivery]
return super(website_sale, self).order_lines_2_google_api(order_lines_not_delivery)
| agpl-3.0 |
lalanza808/lalanza808.github.io | vendor/bundle/ruby/2.0.0/gems/pygments.rb-0.6.3/vendor/pygments-main/pygments/lexers/_stan_builtins.py | 47 | 8721 | # -*- coding: utf-8 -*-
"""
pygments.lexers._stan_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of functions for Stan used by
``pygments.lexers.math.StanLexer``. These builtins are from Stan language v2.2.0.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
KEYWORDS = ['else', 'for', 'if', 'in', 'lp__', 'print', 'while']
TYPES = [ 'cholesky_factor_cov',
'corr_matrix',
'cov_matrix',
'int',
'matrix',
'ordered',
'positive_ordered',
'real',
'row_vector',
'simplex',
'unit_vector',
'vector']
FUNCTIONS = [ 'Phi',
'Phi_approx',
'abs',
'acos',
'acosh',
'asin',
'asinh',
'atan',
'atan2',
'atanh',
'bernoulli_ccdf_log',
'bernoulli_cdf',
'bernoulli_cdf_log',
'bernoulli_log',
'bernoulli_logit_log',
'bernoulli_rng',
'bessel_first_kind',
'bessel_second_kind',
'beta_binomial_ccdf_log',
'beta_binomial_cdf',
'beta_binomial_cdf_log',
'beta_binomial_log',
'beta_binomial_rng',
'beta_ccdf_log',
'beta_cdf',
'beta_cdf_log',
'beta_log',
'beta_rng',
'binary_log_loss',
'binomial_ccdf_log',
'binomial_cdf',
'binomial_cdf_log',
'binomial_coefficient_log',
'binomial_log',
'binomial_logit_log',
'binomial_rng',
'block',
'categorical_log',
'categorical_logit_log',
'categorical_rng',
'cauchy_ccdf_log',
'cauchy_cdf',
'cauchy_cdf_log',
'cauchy_log',
'cauchy_rng',
'cbrt',
'ceil',
'chi_square_ccdf_log',
'chi_square_cdf',
'chi_square_cdf_log',
'chi_square_log',
'chi_square_rng',
'cholesky_decompose',
'col',
'cols',
'columns_dot_product',
'columns_dot_self',
'cos',
'cosh',
'crossprod',
'cumulative_sum',
'determinant',
'diag_matrix',
'diag_post_multiply',
'diag_pre_multiply',
'diagonal',
'digamma',
'dims',
'dirichlet_log',
'dirichlet_rng',
'distance',
'dot_product',
'dot_self',
'double_exponential_ccdf_log',
'double_exponential_cdf',
'double_exponential_cdf_log',
'double_exponential_log',
'double_exponential_rng',
'e',
'eigenvalues_sym',
'eigenvectors_sym',
'erf',
'erfc',
'exp',
'exp2',
'exp_mod_normal_ccdf_log',
'exp_mod_normal_cdf',
'exp_mod_normal_cdf_log',
'exp_mod_normal_log',
'exp_mod_normal_rng',
'expm1',
'exponential_ccdf_log',
'exponential_cdf',
'exponential_cdf_log',
'exponential_log',
'exponential_rng',
'fabs',
'falling_factorial',
'fdim',
'floor',
'fma',
'fmax',
'fmin',
'fmod',
'gamma_ccdf_log',
'gamma_cdf',
'gamma_cdf_log',
'gamma_log',
'gamma_p',
'gamma_q',
'gamma_rng',
'gaussian_dlm_obs_log',
'gumbel_ccdf_log',
'gumbel_cdf',
'gumbel_cdf_log',
'gumbel_log',
'gumbel_rng',
'head',
'hypergeometric_log',
'hypergeometric_rng',
'hypot',
'if_else',
'increment_log_prob',
'int_step',
'inv',
'inv_chi_square_ccdf_log',
'inv_chi_square_cdf',
'inv_chi_square_cdf_log',
'inv_chi_square_log',
'inv_chi_square_rng',
'inv_cloglog',
'inv_gamma_ccdf_log',
'inv_gamma_cdf',
'inv_gamma_cdf_log',
'inv_gamma_log',
'inv_gamma_rng',
'inv_logit',
'inv_sqrt',
'inv_square',
'inv_wishart_log',
'inv_wishart_rng',
'inverse',
'inverse_spd',
'lbeta',
'lgamma',
'lkj_corr_log',
'lkj_corr_rng',
'lmgamma',
'log',
'log10',
'log1m',
'log1m_exp',
'log1m_inv_logit',
'log1p',
'log1p_exp',
'log2',
'log_determinant',
'log_diff_exp',
'log_falling_factorial',
'log_inv_logit',
'log_rising_factorial',
'log_softmax',
'log_sum_exp',
'logistic_ccdf_log',
'logistic_cdf',
'logistic_cdf_log',
'logistic_log',
'logistic_rng',
'logit',
'lognormal_ccdf_log',
'lognormal_cdf',
'lognormal_cdf_log',
'lognormal_log',
'lognormal_rng',
'machine_precision',
'max',
'mdivide_left_tri_low',
'mdivide_right_tri_low',
'mean',
'min',
'modified_bessel_first_kind',
'modified_bessel_second_kind',
'multi_normal_cholesky_log',
'multi_normal_log',
'multi_normal_prec_log',
'multi_normal_rng',
'multi_student_t_log',
'multi_student_t_rng',
'multinomial_log',
'multinomial_rng',
'multiply_log',
'multiply_lower_tri_self_transpose',
'neg_binomial_ccdf_log',
'neg_binomial_cdf',
'neg_binomial_cdf_log',
'neg_binomial_log',
'neg_binomial_rng',
'negative_infinity',
'normal_ccdf_log',
'normal_cdf',
'normal_cdf_log',
'normal_log',
'normal_rng',
'not_a_number',
'ordered_logistic_log',
'ordered_logistic_rng',
'owens_t',
'pareto_ccdf_log',
'pareto_cdf',
'pareto_cdf_log',
'pareto_log',
'pareto_rng',
'pi',
'poisson_ccdf_log',
'poisson_cdf',
'poisson_cdf_log',
'poisson_log',
'poisson_log_log',
'poisson_rng',
'positive_infinity',
'pow',
'prod',
'quad_form',
'rank',
'rayleigh_ccdf_log',
'rayleigh_cdf',
'rayleigh_cdf_log',
'rayleigh_log',
'rayleigh_rng',
'rep_array',
'rep_matrix',
'rep_row_vector',
'rep_vector',
'rising_factorial',
'round',
'row',
'rows',
'rows_dot_product',
'rows_dot_self',
'scaled_inv_chi_square_ccdf_log',
'scaled_inv_chi_square_cdf',
'scaled_inv_chi_square_cdf_log',
'scaled_inv_chi_square_log',
'scaled_inv_chi_square_rng',
'sd',
'segment',
'sin',
'singular_values',
'sinh',
'size',
'skew_normal_ccdf_log',
'skew_normal_cdf',
'skew_normal_cdf_log',
'skew_normal_log',
'skew_normal_rng',
'softmax',
'sort_asc',
'sort_desc',
'sqrt',
'sqrt2',
'square',
'squared_distance',
'step',
'student_t_ccdf_log',
'student_t_cdf',
'student_t_cdf_log',
'student_t_log',
'student_t_rng',
'sub_col',
'sub_row',
'sum',
'tail',
'tan',
'tanh',
'tcrossprod',
'tgamma',
'to_vector',
'trace',
'trace_gen_quad_form',
'trace_quad_form',
'trigamma',
'trunc',
'uniform_ccdf_log',
'uniform_cdf',
'uniform_cdf_log',
'uniform_log',
'uniform_rng',
'variance',
'von_mises_log',
'weibull_ccdf_log',
'weibull_cdf',
'weibull_cdf_log',
'weibull_log',
'weibull_rng',
'wishart_log',
'wishart_rng']
DISTRIBUTIONS = [ 'bernoulli',
'bernoulli_logit',
'beta',
'beta_binomial',
'binomial',
'binomial_logit',
'categorical',
'categorical_logit',
'cauchy',
'chi_square',
'dirichlet',
'double_exponential',
'exp_mod_normal',
'exponential',
'gamma',
'gaussian_dlm_obs',
'gumbel',
'hypergeometric',
'inv_chi_square',
'inv_gamma',
'inv_wishart',
'lkj_corr',
'logistic',
'lognormal',
'multi_normal',
'multi_normal_cholesky',
'multi_normal_prec',
'multi_student_t',
'multinomial',
'neg_binomial',
'normal',
'ordered_logistic',
'pareto',
'poisson',
'poisson_log',
'rayleigh',
'scaled_inv_chi_square',
'skew_normal',
'student_t',
'uniform',
'von_mises',
'weibull',
'wishart']
RESERVED = [ 'alignas',
'alignof',
'and',
'and_eq',
'asm',
'auto',
'bitand',
'bitor',
'bool',
'break',
'case',
'catch',
'char',
'char16_t',
'char32_t',
'class',
'compl',
'const',
'const_cast',
'constexpr',
'continue',
'decltype',
'default',
'delete',
'do',
'double',
'dynamic_cast',
'enum',
'explicit',
'export',
'extern',
'false',
'false',
'float',
'friend',
'goto',
'inline',
'int',
'long',
'mutable',
'namespace',
'new',
'noexcept',
'not',
'not_eq',
'nullptr',
'operator',
'or',
'or_eq',
'private',
'protected',
'public',
'register',
'reinterpret_cast',
'repeat',
'return',
'short',
'signed',
'sizeof',
'static',
'static_assert',
'static_cast',
'struct',
'switch',
'template',
'then',
'this',
'thread_local',
'throw',
'true',
'true',
'try',
'typedef',
'typeid',
'typename',
'union',
'unsigned',
'until',
'using',
'virtual',
'void',
'volatile',
'wchar_t',
'xor',
'xor_eq']
| mit |
ampax/edx-platform | lms/djangoapps/course_api/tests/test_forms.py | 33 | 4212 | """
Tests for Course API forms.
"""
import ddt
from django.contrib.auth.models import AnonymousUser
from django.http import QueryDict
from itertools import product
from urllib import urlencode
from openedx.core.djangoapps.util.test_forms import FormTestMixin
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..forms import CourseDetailGetForm, CourseListGetForm
class UsernameTestMixin(object):
"""
Tests the username Form field.
"""
def test_no_user_param_anonymous_access(self):
self.set_up_data(AnonymousUser())
self.form_data.pop('username')
self.assert_valid(self.cleaned_data)
def test_no_user_param(self):
self.set_up_data(AnonymousUser())
self.form_data.pop('username')
self.assert_valid(self.cleaned_data)
@ddt.ddt
class TestCourseListGetForm(FormTestMixin, UsernameTestMixin, SharedModuleStoreTestCase):
"""
Tests for CourseListGetForm
"""
FORM_CLASS = CourseListGetForm
@classmethod
def setUpClass(cls):
super(TestCourseListGetForm, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCourseListGetForm, self).setUp()
self.student = UserFactory.create()
self.set_up_data(self.student)
def set_up_data(self, user):
"""
Sets up the initial form data and the expected clean data.
"""
self.initial = {'requesting_user': user}
self.form_data = QueryDict(
urlencode({
'username': user.username,
}),
mutable=True,
)
self.cleaned_data = {
'username': user.username,
'org': '',
'mobile': None,
'filter_': None,
}
def test_basic(self):
self.assert_valid(self.cleaned_data)
def test_org(self):
org_value = 'test org name'
self.form_data['org'] = org_value
self.cleaned_data['org'] = org_value
self.assert_valid(self.cleaned_data)
@ddt.data(
*product(
[('mobile', 'mobile_available')],
[(True, True), (False, False), ('1', True), ('0', False), (None, None)],
)
)
@ddt.unpack
def test_filter(self, param_field_name, param_field_value):
param_name, field_name = param_field_name
param_value, field_value = param_field_value
self.form_data[param_name] = param_value
self.cleaned_data[param_name] = field_value
if field_value is not None:
self.cleaned_data['filter_'] = {field_name: field_value}
self.assert_valid(self.cleaned_data)
class TestCourseDetailGetForm(FormTestMixin, UsernameTestMixin, SharedModuleStoreTestCase):
"""
Tests for CourseDetailGetForm
"""
FORM_CLASS = CourseDetailGetForm
@classmethod
def setUpClass(cls):
super(TestCourseDetailGetForm, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCourseDetailGetForm, self).setUp()
self.student = UserFactory.create()
self.set_up_data(self.student)
def set_up_data(self, user):
"""
Sets up the initial form data and the expected clean data.
"""
self.initial = {'requesting_user': user}
self.form_data = QueryDict(
urlencode({
'username': user.username,
'course_key': unicode(self.course.id),
}),
mutable=True,
)
self.cleaned_data = {
'username': user.username,
'course_key': self.course.id,
}
def test_basic(self):
self.assert_valid(self.cleaned_data)
#-- course key --#
def test_no_course_key_param(self):
self.form_data.pop('course_key')
self.assert_error('course_key', "This field is required.")
def test_invalid_course_key(self):
self.form_data['course_key'] = 'invalid_course_key'
self.assert_error('course_key', "'invalid_course_key' is not a valid course key.")
| agpl-3.0 |
demiangomez/Parallel.GAMIT | classes/pyETM.py | 1 | 109441 | # -*- coding: utf-8 -*-
"""
Project: Parallel.Archive
Date: 3/3/17 11:27 AM
Author: Demian D. Gomez
"""
import numpy as np
import pyStationInfo
import pyDate
from numpy import sin
from numpy import cos
from numpy import pi
from scipy.stats import chi2
import pyEvents
from zlib import crc32
from Utils import ct2lg
from Utils import lg2ct
from Utils import rotlg2ct
from os.path import getmtime
from itertools import repeat
from pyBunch import Bunch
from pprint import pprint
import traceback
import warnings
import sys
import os
from time import time
from matplotlib.widgets import Button
import pg
import matplotlib
from io import BytesIO
import base64
import logging
from logging import INFO, ERROR, WARNING, DEBUG, StreamHandler, Formatter
if 'DISPLAY' in os.environ.keys():
if not os.environ['DISPLAY']:
matplotlib.use('Agg')
else:
matplotlib.use('Agg')
language = {
'eng': {
"station": "Station",
"north": "North",
"east": "East",
"up": "Up",
"table_title": "Year Day Relx [mm] Mag",
"periodic": "Periodic amp",
"velocity": "Velocity",
"from_model": "from model",
"acceleration": "Acceleration",
"position": "Ref. Position",
"completion": "Completion",
"other": "other polynomial terms",
"not_enough": "Not enough solutions to fit an ETM.",
"table_too_long": "Table too long to print!",
"frequency": "Frequency",
"N residuals": "N Residuals",
"E residuals": "E Residuals",
"U residuals": "U Residuals",
"histogram plot": "Histogram",
"residual plot": "Residual Plot"
},
'spa': {
"station": u"Estación",
"north": u"Norte",
"east": u"Este",
"up": u"Arriba",
"table_title": u"Año Día Relx [mm] Mag",
"periodic": u"Amp. Periódica",
"velocity": u"Velocidad",
"from_model": u"de modelo",
"acceleration": u"Aceleración",
"position": u"Posición de ref.",
"completion": u"Completitud",
"other": u"otros términos polinómicos",
"not_enough": u"No hay suficientes soluciones para ajustar trayectorias.",
"table_too_long": u"Tabla demasiado larga!",
"frequency": u"Frecuencia",
"N residuals": u"Residuos N",
"E residuals": u"Residuos E",
"U residuals": u"Residuos U",
"histogram plot": u"Histograma",
"residual plot": u"Gráfico de Residuos"
}}
defined = 'LANG' in globals()
if not defined:
LANG = 'eng'
# logger information and setup
logger = logging.getLogger('pyETM')
stream = StreamHandler()
stream.setFormatter(Formatter(' -- %(message)s'))
logger.addHandler(stream)
def tic():
global tt
tt = time()
def toc(text):
global tt
print text + ': ' + str(time() - tt)
LIMIT = 2.5
type_dict = {-1: 'UNDETERMINED',
1: 'GENERIC_JUMP',
2: 'ANTENNA_CHANGE',
5: 'REFERENCE_FRAME_JUMP',
10: 'CO_SEISMIC_JUMP_DECAY',
15: 'CO_SEISMIC_JUMP',
20: 'CO_SEISMIC_DECAY'}
# unknown jump
UNDETERMINED = -1
# no effect: display purposes
GENERIC_JUMP = 1
# antenna change jump
ANTENNA_CHANGE = 2
# reference frame jump
REFERENCE_FRAME_JUMP = 5
# co-seismic jump and decay
CO_SEISMIC_JUMP_DECAY = 10
# co-seismic jump only, no decay
CO_SEISMIC_JUMP = 15
# co-seismic decay only
CO_SEISMIC_DECAY = 20
EQ_MIN_DAYS = 15
JP_MIN_DAYS = 5
DEFAULT_RELAXATION = np.array([0.5])
DEFAULT_POL_TERMS = 2
DEFAULT_FREQUENCIES = np.array((1/365.25, 1/(365.25/2))) # (1 yr, 6 months) expressed in 1/days (one year = 365.25)
SIGMA_FLOOR_H = 0.10
SIGMA_FLOOR_V = 0.15
ESTIMATION = 0
DATABASE = 1
VERSION = '1.2.1'
class pyETMException(Exception):
def __init__(self, value):
self.value = value
self.event = pyEvents.Event(Description=value, EventType='error')
def __str__(self):
return str(self.value)
class pyETMException_NoDesignMatrix(pyETMException):
pass
def distance(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1 = lon1*pi/180
lat1 = lat1*pi/180
lon2 = lon2*pi/180
lat2 = lat2*pi/180
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6371 * c
return km
def to_postgres(dictionary):
if isinstance(dictionary, dict):
for key, val in dictionary.items():
if isinstance(val, np.ndarray):
dictionary[key] = str(val.flatten().tolist()).replace('[', '{').replace(']', '}')
else:
dictionary = str(dictionary.flatten().tolist()).replace('[', '{').replace(']', '}')
return dictionary
def to_list(dictionary):
for key, val in dictionary.items():
if isinstance(val, np.ndarray):
dictionary[key] = val.tolist()
if isinstance(val, pyDate.datetime):
dictionary[key] = val.strftime('%Y-%m-%d %H:%M:%S')
return dictionary
class PppSoln(object):
""""class to extract the PPP solutions from the database"""
def __init__(self, cnn, NetworkCode, StationCode):
self.NetworkCode = NetworkCode
self.StationCode = StationCode
self.hash = 0
self.type = 'ppp'
self.stack_name = 'ppp'
# get the station from the stations table
stn = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode))
stn = stn.dictresult()[0]
if stn['lat'] is not None:
self.lat = np.array([float(stn['lat'])])
self.lon = np.array([float(stn['lon'])])
self.height = np.array([float(stn['height'])])
self.auto_x = np.array([float(stn['auto_x'])])
self.auto_y = np.array([float(stn['auto_y'])])
self.auto_z = np.array([float(stn['auto_z'])])
x = np.array([float(stn['auto_x'])])
y = np.array([float(stn['auto_y'])])
z = np.array([float(stn['auto_z'])])
if stn['max_dist'] is not None:
self.max_dist = stn['max_dist']
else:
self.max_dist = 20
# load all the PPP coordinates available for this station
# exclude ppp solutions in the exclude table and any solution that is more than 20 meters from the simple
# linear trend calculated above
self.excluded = cnn.query_float('SELECT "Year", "DOY" FROM ppp_soln_excl '
'WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode))
self.table = cnn.query_float(
'SELECT "X", "Y", "Z", "Year", "DOY" FROM ppp_soln p1 '
'WHERE p1."NetworkCode" = \'%s\' AND p1."StationCode" = \'%s\' ORDER BY "Year", "DOY"'
% (NetworkCode, StationCode))
self.table = [item for item in self.table
if np.sqrt(np.square(item[0] - x) + np.square(item[1] - y) + np.square(item[2] - z)) <=
self.max_dist and item[3:] not in self.excluded]
self.blunders = [item for item in self.table
if np.sqrt(np.square(item[0] - x) + np.square(item[1] - y) + np.square(item[2] - z)) >
self.max_dist and item[3:] not in self.excluded]
self.solutions = len(self.table)
self.ts_blu = np.array([pyDate.Date(year=item[3], doy=item[4]).fyear for item in self.blunders])
if self.solutions >= 1:
a = np.array(self.table)
self.x = a[:, 0]
self.y = a[:, 1]
self.z = a[:, 2]
self.t = np.array([pyDate.Date(year=item[0], doy=item[1]).fyear for item in a[:, 3:5]])
self.mjd = np.array([pyDate.Date(year=item[0], doy=item[1]).mjd for item in a[:, 3:5]])
self.date = [pyDate.Date(year=item[0], doy=item[1]) for item in a[:, 3:5]]
# continuous time vector for plots
ts = np.arange(np.min(self.mjd), np.max(self.mjd) + 1, 1)
self.mjds = ts
self.ts = np.array([pyDate.Date(mjd=tts).fyear for tts in ts])
else:
if len(self.blunders) >= 1:
raise pyETMException('No viable PPP solutions available for %s.%s (all blunders!)\n'
' -> min distance to station coordinate is %.1f meters'
% (NetworkCode, StationCode, np.array([item[5]
for item in self.blunders]).min()))
else:
raise pyETMException('No PPP solutions available for %s.%s' % (NetworkCode, StationCode))
# get a list of the epochs with files but no solutions.
# This will be shown in the outliers plot as a special marker
rnx = cnn.query(
'SELECT r."ObservationFYear" FROM rinex_proc as r '
'LEFT JOIN ppp_soln as p ON '
'r."NetworkCode" = p."NetworkCode" AND '
'r."StationCode" = p."StationCode" AND '
'r."ObservationYear" = p."Year" AND '
'r."ObservationDOY" = p."DOY"'
'WHERE r."NetworkCode" = \'%s\' AND r."StationCode" = \'%s\' AND '
'p."NetworkCode" IS NULL' % (NetworkCode, StationCode))
self.rnx_no_ppp = rnx.getresult()
self.ts_ns = np.array([item for item in self.rnx_no_ppp])
self.completion = 100. - float(len(self.ts_ns)) / float(len(self.ts_ns) + len(self.t)) * 100.
ppp_hash = cnn.query_float('SELECT sum(hash) FROM ppp_soln p1 '
'WHERE p1."NetworkCode" = \'%s\' AND p1."StationCode" = \'%s\''
% (NetworkCode, StationCode))
self.hash = crc32(str(len(self.t) + len(self.blunders)) + ' ' + str(self.auto_x) + str(self.auto_y) +
str(self.auto_z) + str(ts[0]) + ' ' + str(ts[-1]) + ' ' + str(ppp_hash[0][0]) + VERSION)
else:
raise pyETMException('Station %s.%s has no valid metadata in the stations table.'
% (NetworkCode, StationCode))
class GamitSoln(object):
""""class to extract the GAMIT polyhedrons from the database"""
def __init__(self, cnn, polyhedrons, NetworkCode, StationCode, stack_name):
self.NetworkCode = NetworkCode
self.StationCode = StationCode
self.stack_name = stack_name
self.hash = 0
self.type = 'gamit'
# get the station from the stations table
stn = cnn.query_float('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode), as_dict=True)[0]
if stn['lat'] is not None:
self.lat = np.array([float(stn['lat'])])
self.lon = np.array([float(stn['lon'])])
self.height = np.array([stn['height']])
self.auto_x = np.array([float(stn['auto_x'])])
self.auto_y = np.array([float(stn['auto_y'])])
self.auto_z = np.array([float(stn['auto_z'])])
if stn['max_dist'] is not None:
self.max_dist = stn['max_dist']
else:
self.max_dist = 20
self.solutions = len(polyhedrons)
# blunders
self.blunders = []
self.ts_blu = np.array([])
if self.solutions >= 1:
a = np.array(polyhedrons, dtype=float)
if np.sqrt(np.square(np.sum(np.square(a[0, 0:3])))) > 6.3e3:
# coordinates given in XYZ
nb = np.sqrt(np.square(np.sum(
np.square(a[:, 0:3] - np.array([stn['auto_x'], stn['auto_y'], stn['auto_z']])), axis=1))) \
<= self.max_dist
else:
# coordinates are differences
nb = np.sqrt(np.square(np.sum(np.square(a[:, 0:3]), axis=1))) <= self.max_dist
if np.any(nb):
self.x = a[nb, 0]
self.y = a[nb, 1]
self.z = a[nb, 2]
self.t = np.array([pyDate.Date(year=item[0], doy=item[1]).fyear for item in a[nb, 3:5]])
self.mjd = np.array([pyDate.Date(year=item[0], doy=item[1]).mjd for item in a[nb, 3:5]])
self.date = [pyDate.Date(year=item[0], doy=item[1]) for item in a[nb, 3:5]]
# continuous time vector for plots
ts = np.arange(np.min(self.mjd), np.max(self.mjd) + 1, 1)
self.mjds = ts
self.ts = np.array([pyDate.Date(mjd=tts).fyear for tts in ts])
else:
dd = np.sqrt(np.square(np.sum(
np.square(a[:, 0:3] - np.array([stn['auto_x'], stn['auto_y'], stn['auto_z']])), axis=1)))
raise pyETMException('No viable GAMIT solutions available for %s.%s (all blunders!)\n'
' -> min distance to station coordinate is %.1f meters'
% (NetworkCode, StationCode, dd.min()))
else:
raise pyETMException('No GAMIT polyhedrons vertices available for %s.%s' % (NetworkCode, StationCode))
# get a list of the epochs with files but no solutions.
# This will be shown in the outliers plot as a special marker
rnx = cnn.query(
'SELECT r.* FROM rinex_proc as r '
'LEFT JOIN stacks as p ON '
'r."NetworkCode" = p."NetworkCode" AND '
'r."StationCode" = p."StationCode" AND '
'r."ObservationYear" = p."Year" AND '
'r."ObservationDOY" = p."DOY" AND '
'p."name" = \'%s\''
'WHERE r."NetworkCode" = \'%s\' AND r."StationCode" = \'%s\' AND '
'p."NetworkCode" IS NULL' % (stack_name, NetworkCode, StationCode))
self.rnx_no_ppp = rnx.dictresult()
self.ts_ns = np.array([float(item['ObservationFYear']) for item in self.rnx_no_ppp])
self.completion = 100. - float(len(self.ts_ns)) / float(len(self.ts_ns) + len(self.t)) * 100.
self.hash = crc32(str(len(self.t) + len(self.blunders)) + ' ' + str(ts[0]) + ' ' + str(ts[-1]) + VERSION)
else:
raise pyETMException('Station %s.%s has no valid metadata in the stations table.'
% (NetworkCode, StationCode))
class ListSoln(GamitSoln):
""""class to extract the polyhedrons from a list"""
def __init__(self, cnn, polyhedrons, NetworkCode, StationCode, stack_name='file-unknown'):
super(ListSoln, self).__init__(cnn=cnn, polyhedrons=polyhedrons, NetworkCode=NetworkCode,
StationCode=StationCode, stack_name=stack_name)
self.rnx_no_ppp = []
class JumpTable:
def __init__(self, cnn, NetworkCode, StationCode, soln, t, FitEarthquakes=True, FitGenericJumps=True):
self.table = []
# get earthquakes for this station
self.earthquakes = Earthquakes(cnn, NetworkCode, StationCode, soln, t, FitEarthquakes)
self.generic_jumps = GenericJumps(cnn, NetworkCode, StationCode, soln, t, FitGenericJumps)
jumps = self.earthquakes.table + self.generic_jumps.table
jumps.sort()
# add the relevant jumps, make sure none are incompatible
for jump in jumps:
self.insert_jump(jump)
# verify last jump to make sure there's enough data
if len(self.table) > 0:
jump = None
# find last active jump
for j in self.table[-1::-1]:
# find the previous active jump
if j.fit:
jump = j
break
if jump:
dt = np.max(t[jump.design[:, -1] != 0]) - np.min(t[jump.design[:, -1] != 0])
if (jump.p.jump_type == CO_SEISMIC_JUMP_DECAY and
(dt < 1 and np.count_nonzero(jump.design[:, -1]) / 365.25 < 0.5)):
# was a jump and decay, leave the jump
jump.p.jump_type = CO_SEISMIC_JUMP
jump.param_count -= jump.nr # subtract from param count the number of relaxations
jump.p.params = np.zeros((3, 1))
jump.p.sigmas = np.zeros((3, 1))
# reevaluate the design matrix!
jump.design = jump.eval(t)
jump.rehash()
# for j in self.table:
# print j
self.constrains = np.array([])
def param_count(self):
return sum([jump.param_count for jump in self.table if jump.fit])
def insert_jump(self, jump):
if len(self.table) == 0:
self.table.append(jump)
else:
# take last jump and compare to adding jump
jj = None
for j in self.table[-1::-1]:
# find the previous active jump
if j.fit:
jj = j
break
if not jj:
# no active jumps in the table!
self.table.append(jump)
return
if jump.fit:
# this operation determines if jumps are equivalent
# result is true if equivalent, decision is which one survives
result, decision = jj.__eq__(jump)
if result:
# jumps are equivalent
# decision branches:
# 1) decision == jump, remove previous; add jump
# 2) decision == jj , do not add jump (i.e. do nothing)
if decision is jump:
jj.remove_from_fit()
else:
jump.remove_from_fit()
self.table.append(jump)
def get_design_ts(self, t):
# if function call NOT for inversion, return the columns even if the design matrix is unstable
A = np.array([])
# get the design matrix for the jump table
for jump in self.table:
if jump.fit:
a = jump.eval(t)
if a.size:
if A.size:
# if A is not empty, verify that this jump will not make the matrix singular
tA = np.column_stack((A, a))
# getting the condition number might trigger divide_zero warning => turn off
np.seterr(divide='ignore', invalid='ignore')
if np.linalg.cond(tA) < 1e10:
# adding this jumps doesn't make the matrix singular
A = tA
else:
# if matrix becomes singular, remove from fit!
jump.remove_from_fit()
warnings.warn('%s had to be removed due to high condition number' % str(jump))
else:
A = a
return A
def load_parameters(self, params, sigmas):
for jump in self.table:
if jump.fit:
jump.load_parameters(params=params, sigmas=sigmas)
def print_parameters(self):
output_n = [language[LANG]['table_title']]
output_e = [language[LANG]['table_title']]
output_u = [language[LANG]['table_title']]
for jump in self.table:
# relaxation counter
rx = 0
m = ' -' if np.isnan(jump.magnitude) else jump.magnitude
if jump.fit:
for j, p in enumerate(np.arange(jump.param_count)):
psc = jump.p.params[:, p]
if j == 0 and jump.p.jump_type is not CO_SEISMIC_DECAY:
output_n.append('{} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), psc[0] * 1000.0,
m, jump.action))
output_e.append('{} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), psc[1] * 1000.0,
m, jump.action))
output_u.append('{} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), psc[2] * 1000.0,
m, jump.action))
else:
output_n.append('{} {:4.2f} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
psc[0] * 1000.0, m, jump.action))
output_e.append('{} {:4.2f} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
psc[1] * 1000.0, m, jump.action))
output_u.append('{} {:4.2f} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
psc[2] * 1000.0, m, jump.action))
# relaxation counter
rx += 1
else:
for j, _ in enumerate(np.arange(jump.param_count)):
if j == 0 and jump.p.jump_type is not CO_SEISMIC_DECAY:
# the only type of jump that does not show the jump is a co-seismic decay
output_n.append('{} - {} {}'.format(jump.date.yyyyddd(), m, jump.action))
output_e.append('{} - {} {}'.format(jump.date.yyyyddd(), m, jump.action))
output_u.append('{} - {} {}'.format(jump.date.yyyyddd(), m, jump.action))
else:
output_n.append('{} {:4.2f} - {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
m, jump.action))
output_e.append('{} {:4.2f} - {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
m, jump.action))
output_u.append('{} {:4.2f} - {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
m, jump.action))
# relaxation counter
rx += 1
if len(output_n) > 22:
output_n = output_n[0:22] + [language[LANG]['table_too_long']]
output_e = output_e[0:22] + [language[LANG]['table_too_long']]
output_u = output_u[0:22] + [language[LANG]['table_too_long']]
return '\n'.join(output_n), '\n'.join(output_e), '\n'.join(output_u)
class EtmFunction(object):
def __init__(self, **kwargs):
self.p = Bunch()
self.p.NetworkCode = kwargs['NetworkCode']
self.p.StationCode = kwargs['StationCode']
self.p.soln = kwargs['soln'].type
self.p.stack = kwargs['soln'].stack_name
self.p.params = np.array([])
self.p.sigmas = np.array([])
self.p.object = ''
self.p.metadata = None
self.p.hash = 0
self.param_count = 0
self.column_index = np.array([])
self.format_str = ''
self.fit = True
def load_parameters(self, **kwargs):
params = kwargs['params']
sigmas = kwargs['sigmas']
if params.ndim == 1:
# parameters coming from the database, reshape
params = params.reshape((3, params.shape[0] / 3))
if sigmas.ndim == 1:
# parameters coming from the database, reshape
sigmas = sigmas.reshape((3, sigmas.shape[0] / 3))
# determine if parameters are coming from the X vector (LSQ) or from the database (solution for self only)
if params.shape[1] > self.param_count:
# X vector
self.p.params = params[:, self.column_index]
self.p.sigmas = sigmas[:, self.column_index]
else:
# database (solution for self only; no need for column_index)
self.p.params = params
self.p.sigmas = sigmas
class Jump(EtmFunction):
"""
generic jump (mechanic jump, frame change, etc) class
:argument NetworkCode
:argument StationCode
"""
def __init__(self, NetworkCode, StationCode, soln, t, date, metadata, dtype=GENERIC_JUMP, action='A', fit=True):
super(Jump, self).__init__(NetworkCode=NetworkCode, StationCode=StationCode, soln=soln)
# in the future, can load parameters from the db
self.p.object = 'jump'
# define initial state variables
self.date = date
self.p.jump_date = date.datetime()
self.p.metadata = metadata
self.p.jump_type = dtype
# new property to identify manually added (or removed) jumps
self.action = action
# new property indicating if jump should be adjusted or not
self.fit = fit
# add the magnitude property to allow transformation from CO_SEISMIC_JUMP_DECAY to CO_SEISMIC_JUMP and still
# print the magnitude of the event in the jump table
self.magnitude = np.nan
# the param count of a jump is one!
self.param_count = 1
if self.fit:
# evaluate only if the jump is not flagged as NO EFFECT
self.design = Jump.eval(self, t)
else:
self.design = np.array([])
if not np.any(self.design) or np.all(self.design):
# a valid jump only has some rows == 1 in the design table,
# not all rows (all rows produces a singular matrix)
self.design = np.array([])
self.fit = False
if dtype not in (CO_SEISMIC_JUMP, CO_SEISMIC_DECAY, CO_SEISMIC_JUMP_DECAY):
logger.info('Mechanical Jump -> Adding jump on %s type: %s; Action: %s; Fit: %s'
% (self.date.yyyyddd(), type_dict[dtype], action, str(self.fit)))
Jump.rehash(self)
def rehash(self):
self.p.hash = crc32(str(self.date) + str(self.fit) + VERSION)
def remove_from_fit(self):
# this method will make this jump type = NO_EFFECT and adjust its params
self.fit = False
self.design = np.array([])
self.rehash()
def eval(self, t):
# given a time vector t, return the design matrix column vector(s)
if not self.fit:
return np.array([])
ht = np.zeros((t.shape[0], 1))
ht[t > self.date.fyear] = 1.
return ht
def load_parameters(self, **kwargs):
if self.fit:
EtmFunction.load_parameters(self, **kwargs)
def __eq__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: ' + str(type(jump)) + ' invalid. Can compare two Jump objects')
if not self.fit and jump.fit:
# if comparing to a self that has NO_EFFECT, remove and keep jump
return True, jump
elif self.fit and not jump.fit:
# if comparing against a jump that has NO_EFFECT, remove jump keep self
return True, self
elif not self.fit and not jump.fit:
# no jump has an effect, return None. This will be interpreted as False (if not result)
return None, None
# if we got here, then both jumps have fit == True
# compare two jumps together and make sure they will not generate a singular (or near singular) system of eq
c = np.sum(np.logical_xor(self.design[:, 0], jump.design[:, 0]))
dt = jump.date - self.date
# print ' ', jump.date, self.date, dt, c
if self.p.jump_type >= 10 and jump.p.jump_type >= 10:
# jump type > 10 => co-seismic jump
# if self is a co-seismic jump and next jump is also co-seismic
# and there are more than two weeks of data to constrain params, return false (not equal)
# otherwise, decide based on the magnitude of events
if c < self.param_count + 1 or (dt < 365 and c / 365.25 < 0.1):
if self.magnitude < jump.magnitude:
return True, jump
else:
return True, self
else:
return False, None
elif self.p.jump_type >= 10 and 0 < jump.p.jump_type < 10:
if c < self.param_count + 1 or (dt < 365 and c / 365.25 < 0.1):
# can't fit the co-seismic or generic jump AND the generic jump after, remove generic jump
return True, self
else:
return False, None
elif 0 < self.p.jump_type < 10 and jump.p.jump_type >= 10:
if c < self.param_count + 1 or (dt < 365 and c / 365.25 < 0.1):
# if generic jump before an earthquake jump and less than 5 days, co-seismic prevails
return True, jump
else:
return False, None
elif 0 < self.p.jump_type < 10 and 0 < jump.p.jump_type < 10:
# two generic jumps. As long as they can be constrained, we are fine
if c < self.param_count + 1 or (dt < 365 and c / 365.25 < 0.1):
return True, jump
else:
return False, None
def __str__(self):
return 'date=' + str(self.date) + ', type=' + type_dict[self.p.jump_type] + ', metadata="' + self.p.metadata + \
'", action="' + str(self.action) + '", fit=' + str(self.fit)
def __repr__(self):
return 'pyPPPETM.Jump(' + str(self) + ')'
def __lt__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: '+str(type(jump))+' invalid. Can only compare Jump objects')
return self.date.fyear < jump.date.fyear
def __le__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: '+str(type(jump))+' invalid. Can only compare Jump objects')
return self.date.fyear <= jump.date.fyear
def __gt__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: '+str(type(jump))+' invalid. Can only compare Jump objects')
return self.date.fyear > jump.date.fyear
def __ge__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: '+str(type(jump))+' invalid. Can only compare Jump objects')
return self.date.fyear >= jump.date.fyear
def __hash__(self):
# to make the object hashable
return hash(self.date.fyear)
class CoSeisJump(Jump):
def __init__(self, NetworkCode, StationCode, soln, t, date, relaxation, metadata,
dtype=CO_SEISMIC_JUMP_DECAY, magnitude=0., action='A', fit=True):
# super-class initialization
Jump.__init__(self, NetworkCode, StationCode, soln, t, date, metadata, dtype, action, fit)
# if t.min() > date, change to CO_SEISMIC_DECAY
# if jump / decay manually deactivated, fit == False and it's not changed below
if date.fyear < t.min():
self.p.jump_type = CO_SEISMIC_DECAY
else:
self.p.jump_type = dtype
# new feature informs the magnitude of the event in the plot
self.magnitude = magnitude
if not self.fit and fit:
# came back from init with empty design matrix (fit = false) and originally fit was True.
# May be a jump before t.min()
# assign just the decay
self.p.jump_type = CO_SEISMIC_DECAY
# put fit back to original state
self.fit = fit
# if T is an array, it contains the corresponding decays
# otherwise, it is a single decay
if not isinstance(relaxation, np.ndarray):
relaxation = np.array([relaxation])
self.param_count += relaxation.shape[0]
if self.p.jump_type == CO_SEISMIC_DECAY:
# if CO_SEISMIC_DECAY, subtract one from parameters
self.param_count -= 1
self.nr = relaxation.shape[0]
self.p.relaxation = relaxation
if self.fit:
self.design = self.eval(t)
else:
self.design = np.array([])
logger.info('Geophysical Jump -> Adding jump on %s type: %s; Mag: %.1f; Action: %s; Fit: %s'
% (self.date.yyyyddd(), type_dict[dtype], magnitude, action, str(self.fit)))
self.rehash()
def rehash(self):
# co-seismic jump already has the version hash value from Jump object
self.p.hash = crc32(str(self.date) + str(self.fit) + str(self.param_count) + str(self.p.jump_type) +
str(self.p.relaxation) + str(self.fit) + VERSION)
def eval(self, t):
ht = Jump.eval(self, t)
# if there is nothing in ht, then there is no expected output, return none
if not np.any(ht):
return np.array([])
# if it was determined that this is just a co-seismic jump (no decay), return ht
if self.p.jump_type == CO_SEISMIC_JUMP:
return ht
# support more than one decay
hl = np.zeros((t.shape[0], self.nr))
for i, T in enumerate(self.p.relaxation):
hl[t > self.date.fyear, i] = np.log10(1. + (t[t > self.date.fyear] - self.date.fyear) / T)
# if it's both jump and decay, return ht + hl
if np.any(hl) and self.p.jump_type == CO_SEISMIC_JUMP_DECAY:
return np.column_stack((ht, hl))
# if decay only, return hl
elif np.any(hl) and self.p.jump_type == CO_SEISMIC_DECAY:
return hl
def __str__(self):
return Jump.__str__(self) + ', relax=' + str(self.p.relaxation)
def __repr__(self):
return 'pyPPPETM.CoSeisJump(' + str(self) + ')'
class Earthquakes:
def __init__(self, cnn, NetworkCode, StationCode, soln, t, FitEarthquakes=True):
self.StationCode = StationCode
self.NetworkCode = NetworkCode
# station location
stn = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode))
stn = stn.dictresult()[0]
# load metadata
lat = float(stn['lat'])
lon = float(stn['lon'])
# establish the limit dates. Ignore jumps before 5 years from the earthquake
# sdate = pyDate.Date(fyear=t.min() - 5)
# DDG 30/04/2020: now do not treat the earthquakes before the start date
# the same as those happening after the start date
sdate = pyDate.Date(fyear=t.min())
edate = pyDate.Date(fyear=t.max())
# get the earthquakes based on Mike's expression
# earthquakes before the start data: only magnitude 7+
jumps = cnn.query_float('SELECT * FROM earthquakes '
'WHERE date BETWEEN \'%s\' AND \'%s\' UNION '
'SELECT * FROM earthquakes '
'WHERE date BETWEEN \'%s\' AND \'%s\' AND mag >= 7 '
'ORDER BY date'
% (sdate.yyyymmdd(), edate.yyyymmdd(),
pyDate.Date(fyear=t.min() - 5).yyyymmdd(), sdate.yyyymmdd()), as_dict=True)
# check if data range returned any jumps
if jumps and FitEarthquakes:
eq = [[float(jump['lat']), float(jump['lon']), float(jump['mag']),
int(jump['date'].year), int(jump['date'].month), int(jump['date'].day),
int(jump['date'].hour), int(jump['date'].minute), int(jump['date'].second)] for jump in jumps]
eq = np.array(list(eq))
dist = distance(lon, lat, eq[:, 1], eq[:, 0])
m = -0.8717 * (np.log10(dist) - 2.25) + 0.4901 * (eq[:, 2] - 6.6928)
# build the earthquake jump table
# remove event events that happened the same day
eq_jumps = list(set((float(eqs[2]), pyDate.Date(year=int(eqs[3]), month=int(eqs[4]), day=int(eqs[5]),
hour=int(eqs[6]), minute=int(eqs[7]), second=int(eqs[8])))
for eqs in eq[m > 0, :]))
eq_jumps.sort(key=lambda x: (x[1], -x[0]))
# open the jumps table
jp = cnn.query_float('SELECT * FROM etm_params WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'AND soln = \'%s\' AND jump_type <> 0 AND object = \'jump\''
% (NetworkCode, StationCode, soln.type), as_dict=True)
# start by collapsing all earthquakes for the same day.
# Do not allow more than one earthquake on the same day
f_jumps = []
next_date = None
for mag, date in eq_jumps:
# jumps are analyzed in windows that are EQ_MIN_DAYS long
# a date should not be analyzed if it's < next_date
if next_date is not None:
if date < next_date:
continue
# obtain jumps in a EQ_MIN_DAYS window
jumps = [(m, d) for m, d in eq_jumps if date <= d < date + EQ_MIN_DAYS]
if len(jumps) > 1:
# if more than one jump, get the max magnitude
mmag = max([m for m, _ in jumps])
# only keep the earthquake with the largest magnitude
for m, d in jumps:
table = [j['action'] for j in jp if j['Year'] == d.year and j['DOY'] == d.doy]
# get a different relaxation for this date
relax = [j['relaxation'] for j in jp if j['Year'] == d.year and j['DOY'] == d.doy]
if relax:
if relax[0] is not None:
relaxation = np.array(relax[0])
else:
relaxation = DEFAULT_RELAXATION
else:
relaxation = DEFAULT_RELAXATION
# if present in jump table, with either + of -, don't use default decay
if m == mmag and '-' not in table:
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, d, relaxation,
'mag=%.1f' % m, magnitude=m, action='+' if '+' in table else 'A')]
# once the jump was added, exit for loop
break
else:
# add only if in jump list with a '+'
if '+' in table:
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, d,
relaxation, 'mag=%.1f' % m, magnitude=m, action='+')]
# once the jump was added, exit for loop
break
else:
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, d,
relaxation, 'mag=%.1f' % m, action='-', fit=False)]
else:
# add, unless marked in table with '-'
table = [j['action'] for j in jp if j['Year'] == date.year and j['DOY'] == date.doy]
# get a different relaxation for this date
relax = [j['relaxation'] for j in jp if j['Year'] == date.year and j['DOY'] == date.doy]
if relax:
if relax[0] is not None:
relaxation = np.array(relax[0])
else:
relaxation = DEFAULT_RELAXATION
else:
relaxation = DEFAULT_RELAXATION
if '-' not in table:
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, date,
relaxation, 'mag=%.1f' % mag, magnitude=mag,
action='+' if '+' in table else 'A')]
else:
# add it with NO_EFFECT for display purposes
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, date,
relaxation, 'mag=%.1f' % mag, magnitude=mag, action='-', fit=False)]
next_date = date + EQ_MIN_DAYS
# final jump table
self.table = f_jumps
else:
self.table = []
class GenericJumps(object):
def __init__(self, cnn, NetworkCode, StationCode, soln, t, FitGenericJumps=True):
self.solution_type = soln.type
self.table = []
if t.size >= 2:
# analyze if it is possible to add the jumps (based on the available data)
wt = np.sort(np.unique(t - np.fix(t)))
# analyze the gaps in the data
dt = np.diff(wt)
# max dt (internal)
dtmax = np.max(dt)
# dt wrapped around
dt_interyr = 1 - wt[-1] + wt[0]
if dt_interyr > dtmax:
dtmax = dt_interyr
if dtmax <= 0.2465 and FitGenericJumps:
# put jumps in
self.add_metadata_jumps = True
else:
# no jumps
self.add_metadata_jumps = False
else:
self.add_metadata_jumps = False
# open the jumps table
jp = cnn.query('SELECT * FROM etm_params WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'AND soln = \'%s\' AND jump_type = 0 AND object = \'jump\''
% (NetworkCode, StationCode, self.solution_type))
jp = jp.dictresult()
# get station information
self.stninfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode)
for stninfo in self.stninfo.records[1:]:
date = stninfo['DateStart']
table = [j['action'] for j in jp if j['Year'] == date.year and j['DOY'] == date.doy]
# add to list only if:
# 1) add_meta = True AND there is no '-' OR
# 2) add_meta = False AND there is a '+'
self.table.append(Jump(NetworkCode, StationCode, soln, t, date,
'Ant-Rec: %s-%s' % (stninfo['AntennaCode'], stninfo['ReceiverCode']),
dtype=ANTENNA_CHANGE,
action=table[0] if table else 'A',
fit=True if '+' in table or (self.add_metadata_jumps and '-' not in table)
else False))
# frame changes if ppp
if self.solution_type == 'ppp':
frames = cnn.query(
'SELECT distinct on ("ReferenceFrame") "ReferenceFrame", "Year", "DOY" from ppp_soln WHERE '
'"NetworkCode" = \'%s\' AND "StationCode" = \'%s\' order by "ReferenceFrame", "Year", "DOY"' %
(NetworkCode, StationCode))
frames = frames.dictresult()
if len(frames) > 1:
# more than one frame, add a jump
frames.sort(key=lambda k: k['Year'])
for frame in frames[1:]:
date = pyDate.Date(Year=frame['Year'], doy=frame['DOY'])
table = [j['action'] for j in jp if j['Year'] == date.year and j['DOY'] == date.doy]
self.table.append(Jump(NetworkCode, StationCode, soln, t, date,
'Frame Change: %s' % frame['ReferenceFrame'],
dtype=REFERENCE_FRAME_JUMP,
action=table[0] if table else 'A',
fit=True if '-' not in table else False))
# now check the jump table to add specific jumps
jp = cnn.query('SELECT * FROM etm_params WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'AND soln = \'%s\' AND jump_type = 0 AND object = \'jump\' '
'AND action = \'+\'' % (NetworkCode, StationCode, self.solution_type))
jp = jp.dictresult()
table = [j.date for j in self.table]
for j in jp:
date = pyDate.Date(Year=j['Year'], doy=j['DOY'])
if date not in table:
self.table.append(Jump(NetworkCode, StationCode, soln, t, date, 'mechanic-jump',
dtype=GENERIC_JUMP, action='+'))
class Periodic(EtmFunction):
""""class to determine the periodic terms to be included in the ETM"""
def __init__(self, cnn, NetworkCode, StationCode, soln, t, FitPeriodic=True):
super(Periodic, self).__init__(NetworkCode=NetworkCode, StationCode=StationCode, soln=soln)
try:
# load the frequencies from the database
etm_param = cnn.get('etm_params',
{'NetworkCode': NetworkCode, 'StationCode': StationCode, 'soln': soln.type,
'object': 'periodic'},
['NetworkCode', 'StationCode', 'soln', 'object'])
self.p.frequencies = np.array([float(p) for p in etm_param['frequencies']])
except pg.DatabaseError:
self.p.frequencies = DEFAULT_FREQUENCIES
self.p.object = 'periodic'
if t.size > 1 and FitPeriodic:
# wrap around the solutions
wt = np.sort(np.unique(t - np.fix(t)))
# analyze the gaps in the data
dt = np.diff(wt)
# max dt (internal)
dtmax = np.max(dt)
# dt wrapped around
dt_interyr = 1 - wt[-1] + wt[0]
if dt_interyr > dtmax:
dtmax = dt_interyr
# save the value of the max wrapped delta time
self.dt_max = dtmax
# get the 50 % of Nyquist for each component (and convert to average fyear)
self.nyquist = ((1 / self.p.frequencies) / 2.) * 0.5 * 1 / 365.25
# frequency count
self.frequency_count = int(np.sum(self.dt_max <= self.nyquist))
# redefine the frequencies vector to accommodate only the frequencies that can be fit
self.p.frequencies = self.p.frequencies[self.dt_max <= self.nyquist]
else:
# no periodic terms
self.frequency_count = 0
self.p.frequencies = np.array([])
self.dt_max = 1 # one year of delta t
logger.info('Periodic -> Frequency count: %i; FitPeriodic: %s' % (self.frequency_count, str(FitPeriodic)))
# build the metadata description for the json string
self.p.metadata = '['
for k in ['n', 'e', 'u']:
self.p.metadata = self.p.metadata + '['
meta = []
for i in ['sin', 'cos']:
for f in (1 / (self.p.frequencies * 365.25)).tolist():
meta.append('%s:%s(%.1f yr)' % (k, i, f))
self.p.metadata = self.p.metadata + ','.join(meta) + '],'
self.p.metadata = self.p.metadata + ']'
self.design = self.get_design_ts(t)
self.param_count = self.frequency_count * 2
# declare the location of the answer (to be filled by Design object)
self.column_index = np.array([])
self.format_str = language[LANG]['periodic'] + ' (' + \
', '.join(['%.1f yr' % i for i in (1 / (self.p.frequencies * 365.25)).tolist()]) + \
') N: %s E: %s U: %s [mm]'
self.p.hash = crc32(str(self.p.frequencies) + VERSION)
def get_design_ts(self, ts):
# if dtmax < 3 months (90 days = 0.1232), then we can fit the annual
# if dtmax < 1.5 months (45 days = 0.24657), then we can fit the semi-annual too
if self.frequency_count > 0:
f = self.p.frequencies
f = np.tile(f, (ts.shape[0], 1))
As = np.array(sin(2 * pi * f * 365.25 * np.tile(ts[:, np.newaxis], (1, f.shape[1]))))
Ac = np.array(cos(2 * pi * f * 365.25 * np.tile(ts[:, np.newaxis], (1, f.shape[1]))))
A = np.column_stack((As, Ac))
else:
# no periodic terms
A = np.array([])
return A
def print_parameters(self):
n = np.array([])
e = np.array([])
u = np.array([])
for p in np.arange(self.param_count):
psc = self.p.params[:, p]
sn = psc[0]
se = psc[1]
su = psc[2]
n = np.append(n, sn)
e = np.append(e, se)
u = np.append(u, su)
n = n.reshape((2, self.param_count / 2))
e = e.reshape((2, self.param_count / 2))
u = u.reshape((2, self.param_count / 2))
# calculate the amplitude of the components
an = np.sqrt(np.square(n[0, :]) + np.square(n[1, :]))
ae = np.sqrt(np.square(e[0, :]) + np.square(e[1, :]))
au = np.sqrt(np.square(u[0, :]) + np.square(u[1, :]))
return self.format_str % (np.array_str(an * 1000.0, precision=1),
np.array_str(ae * 1000.0, precision=1),
np.array_str(au * 1000.0, precision=1))
class Polynomial(EtmFunction):
""""class to build the linear portion of the design matrix"""
def __init__(self, cnn, NetworkCode, StationCode, soln, t, t_ref=0, interseismic=None):
super(Polynomial, self).__init__(NetworkCode=NetworkCode, StationCode=StationCode, soln=soln)
# t ref (just the beginning of t vector)
if t_ref == 0:
t_ref = np.min(t)
self.p.object = 'polynomial'
self.p.t_ref = t_ref
self.interseismic = np.zeros((3, t.shape[0]))
if interseismic:
logger.info('Polynomial -> Interseismic velocity provided: removing velocity from fit')
# interseismic model provided, do not fit linear (remove trend)
tt = (t - t_ref)
if type(interseismic) is list:
interseismic = np.array(interseismic)
# convert to np if list is given
for i in range(3):
self.interseismic[i] = tt * interseismic[i]
self.terms = 1
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]\n' \
+ language[LANG]['velocity'] + ' (' \
+ language[LANG]['from_model'] + ')' + \
' N: {:.2f} E: {:.2f} U: {:.2f} [mm/yr]'.format(*(interseismic * 1000))
self.p.metadata = '[[n:pos, n:vel],[e:pos, e:vel],[u:pos, u:vel]]'
else:
try:
# load the number of terms from the database
etm_param = cnn.get('etm_params',
{'NetworkCode': NetworkCode, 'StationCode': StationCode, 'soln': soln.type,
'object': 'polynomial'},
['NetworkCode', 'StationCode', 'soln', 'object'])
self.terms = int(etm_param['terms'])
except pg.DatabaseError:
self.terms = DEFAULT_POL_TERMS
logger.info('Polynomial -> Fitting %i term(s)' % self.terms)
if self.terms == 1:
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]'
self.p.metadata = '[[n:pos],[e:pos],[u:pos]]'
elif self.terms == 2:
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]\n' \
+ language[LANG]['velocity'] + ' N: {:.2f} E: {:.2f} U: {:.2f} [mm/yr]'
self.p.metadata = '[[n:pos, n:vel],[e:pos, e:vel],[u:pos, u:vel]]'
elif self.terms == 3:
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]\n' \
+ language[LANG]['velocity'] + ' N: {:.3f} E: {:.3f} U: {:.3f} [mm/yr]\n' \
+ language[LANG]['acceleration'] + ' N: {:.2f} E: {:.2f} U: {:.2f} [mm/yr**2]'
self.p.metadata = '[[n:pos, n:vel, n:acc],[e:pos, e:vel, e:acc],[u:pos, u:vel, u:acc]]'
elif self.terms > 3:
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]\n' \
+ language[LANG]['velocity'] + ' N: {:.3f} E: {:.3f} U: {:.3f} [mm/yr]\n' \
+ language[LANG]['acceleration'] + ' N: {:.2f} E: {:.2f} U: {:.2f} [mm/yr**2] + ' \
+ '%i ' % (self.terms - 3) + language[LANG]['other']
self.p.metadata = '[[n:pos, n:vel, n:acc, n:tx...],' \
'[e:pos, e:vel, e:acc, e:tx...],' \
'[u:pos, u:vel, u:acc, u:tx...]]'
self.design = self.get_design_ts(t)
# always first in the list of A, index columns are fixed
self.column_index = np.arange(self.terms)
# param count is the same as terms
self.param_count = self.terms
# save the hash of the object
self.p.hash = crc32(str(self.terms) + VERSION)
def load_parameters(self, params, sigmas, t_ref):
super(Polynomial, self).load_parameters(params=params, sigmas=sigmas)
self.p.t_ref = t_ref
def print_parameters(self, ref_xyz, lat, lon):
params = np.zeros((3,))
for p in np.arange(self.terms):
if p == 0:
params[0], params[1], params[2] = lg2ct(self.p.params[0, 0],
self.p.params[1, 0],
self.p.params[2, 0], lat, lon)
params += ref_xyz.flatten()
elif p > 0:
n = self.p.params[0, p]
e = self.p.params[1, p]
u = self.p.params[2, p]
params = np.append(params, (n*1000, e*1000, u*1000))
return self.format_str.format(*params.tolist())
def get_design_ts(self, ts):
A = np.zeros((ts.size, self.terms))
for p in np.arange(self.terms):
A[:, p] = np.power(ts - self.p.t_ref, p)
return A
class Design(np.ndarray):
def __new__(subtype, Linear, Jumps, Periodic, dtype=float, buffer=None, offset=0, strides=None, order=None):
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to InfoArray.__array_finalize__
shape = (Linear.design.shape[0], Linear.param_count + Jumps.param_count() + Periodic.param_count)
A = super(Design, subtype).__new__(subtype, shape, dtype, buffer, offset, strides, order)
A[:, Linear.column_index] = Linear.design
# determine the column_index for all objects
col_index = Linear.param_count
for jump in Jumps.table:
# save the column index
if jump.fit:
jump.column_index = np.arange(col_index, col_index + jump.param_count)
# assign the portion of the design matrix
A[:, jump.column_index] = jump.design
# increment the col_index
col_index += jump.param_count
Periodic.column_index = np.arange(col_index, col_index + Periodic.param_count)
A[:, Periodic.column_index] = Periodic.design
# save the object list
A.objects = (Linear, Jumps, Periodic)
# save the number of total parameters
A.linear_params = Linear.param_count
A.jump_params = Jumps.param_count()
A.periodic_params = Periodic.param_count
A.params = Linear.param_count + Jumps.param_count() + Periodic.param_count
# save the constrains matrix
A.constrains = Jumps.constrains
# Finally, we must return the newly created object:
return A
def __call__(self, ts=None, constrains=False):
if ts is None:
if constrains:
if self.constrains.size:
A = self.copy()
# resize matrix (use A.resize so that it fills with zeros)
A.resize((self.shape[0] + self.constrains.shape[0], self.shape[1]), refcheck=False)
# apply constrains
A[-self.constrains.shape[0]:, self.jump_params] = self.constrains
return A
else:
return self
else:
return self
else:
A = np.array([])
for obj in self.objects:
tA = obj.get_design_ts(ts)
if A.size:
A = np.column_stack((A, tA)) if tA.size else A
else:
A = tA
return A
def get_l(self, L, constrains=False):
if constrains:
if self.constrains.size:
tL = L.copy()
tL.resize((L.shape[0] + self.constrains.shape[0]), refcheck=False)
return tL
else:
return L
else:
return L
def get_p(self, constrains=False):
# return a weight matrix full of ones with or without the extra elements for the constrains
return np.ones((self.shape[0])) if not constrains else \
np.ones((self.shape[0] + self.constrains.shape[0]))
def remove_constrains(self, v):
# remove the constrains to whatever vector is passed
if self.constrains.size:
return v[0:-self.constrains.shape[0]]
else:
return v
class ETM:
def __init__(self, cnn, soln, no_model=False, FitEarthquakes=True, FitGenericJumps=True, FitPeriodic=True,
interseismic=None):
# to display more verbose warnings
# warnings.showwarning = self.warn_with_traceback
self.C = np.array([])
self.S = np.array([])
self.F = np.array([])
self.R = np.array([])
self.P = np.array([])
self.factor = np.array([])
self.covar = np.zeros((3, 3))
self.A = None
self.param_origin = ESTIMATION
self.soln = soln
self.no_model = no_model
self.FitEarthquakes = FitEarthquakes
self.FitGenericJumps = FitGenericJumps
self.FitPeriodic = FitPeriodic
self.NetworkCode = soln.NetworkCode
self.StationCode = soln.StationCode
logger.info('Creating ETM object for %s.%s' % (self.NetworkCode, self.StationCode))
# save the function objects
self.Linear = Polynomial(cnn, soln.NetworkCode, soln.StationCode, self.soln, self.soln.t,
interseismic=interseismic)
self.Periodic = Periodic(cnn, soln.NetworkCode, soln.StationCode, self.soln, self.soln.t, FitPeriodic)
self.Jumps = JumpTable(cnn, soln.NetworkCode, soln.StationCode, self.soln, self.soln.t,
FitEarthquakes, FitGenericJumps)
# calculate the hash value for this station
# now hash also includes the timestamp of the last time pyETM was modified.
self.hash = soln.hash
# anything less than four is not worth it
if soln.solutions > 4 and not no_model:
# to obtain the parameters
self.A = Design(self.Linear, self.Jumps, self.Periodic)
# check if problem can be solved!
if self.A.shape[1] >= soln.solutions:
self.A = None
return
self.As = self.A(soln.ts)
else:
logger.info('Less than 4 solutions, cannot calculate ETM')
def run_adjustment(self, cnn, l, plotit=False, soln=None):
if self.A is not None:
# try to load the last ETM solution from the database
etm_objects = cnn.query_float('SELECT * FROM etms WHERE "NetworkCode" = \'%s\' '
'AND "StationCode" = \'%s\' AND soln = \'%s\' AND stack = \'%s\''
% (self.NetworkCode, self.StationCode, self.soln.type,
self.soln.stack_name), as_dict=True)
# DDG: Attention: it is not always possible to retrieve the parameters from the database using the hash
# strategy. The jump table is determined and their hash values calculated. The fit attribute goes into the
# hash value. When an unrealistic jump is detected, the jump is removed from the fit and the final
# parameters are saved without this jump. Thus, when loading the object, the jump will be added to fit but
# it will not be present in the database.
db_hash_sum = sum([obj['hash'] for obj in etm_objects])
jumps_hash = sum([o.p.hash for o in self.Jumps.table if o.fit])
ob_hash_sum = self.Periodic.p.hash + self.Linear.p.hash + self.hash + jumps_hash
cn_object_sum = len([o.p.hash for o in self.Jumps.table if o.fit]) + 2
# -1 to account for the var_factor entry
if len(etm_objects) - 1 == cn_object_sum and db_hash_sum == ob_hash_sum:
logger.info('ETM -> Loading parameters from database (db hash %i; ob hash %i)'
% (db_hash_sum, ob_hash_sum))
# load the parameters from th db
self.load_parameters(etm_objects, l)
# signal the outside world that the parameters were loaded from the database (no need to save them)
self.param_origin = DATABASE
else:
logger.info('ETM -> Estimating parameters (db hash %i; ob hash %i)'
% (db_hash_sum, ob_hash_sum))
# signal the outside world that the parameters were estimated (and need to be saves)
self.param_origin = ESTIMATION
# purge table and recompute
cnn.query('DELETE FROM etms WHERE "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\' AND soln = \'%s\' AND stack = \'%s\''
% (self.NetworkCode, self.StationCode, self.soln.type, self.soln.stack_name))
if self.soln.type == 'dra':
# if the solution is of type 'dra', delete the excluded solutions
cnn.query('DELETE FROM gamit_soln_excl WHERE "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\'' % (self.NetworkCode, self.StationCode))
# use the default parameters from the objects
t_ref = self.Linear.p.t_ref
j = 0
do_again = False
while j < 10:
c = []
f = []
s = []
r = []
p = []
factor = []
for i in range(3):
x, sigma, index, residuals, fact, w = self.adjust_lsq(self.A, l[i])
c.append(x)
s.append(sigma)
f.append(index)
r.append(residuals)
factor.append(fact)
p.append(w)
self.C = np.array(c)
self.S = np.array(s)
self.F = np.array(f)
self.R = np.array(r)
self.factor = np.array(factor)
self.P = np.array(p)
# load_parameters to the objects
self.Linear.load_parameters(self.C, self.S, t_ref)
self.Jumps.load_parameters(self.C, self.S)
self.Periodic.load_parameters(params=self.C, sigmas=self.S)
# determine if any jumps are unrealistic
for jump in self.Jumps.table:
if jump.fit and jump.p.jump_type in (CO_SEISMIC_JUMP_DECAY, CO_SEISMIC_DECAY) \
and np.any(np.abs(jump.p.params[:, -jump.nr:]) > 0.5):
# unrealistic, remove
jump.remove_from_fit()
do_again = True
logger.info('ETM -> Unrealistic jump detected (%s : %s), removing and redoing fit'
% (np.array_str(jump.p.params[:, -jump.nr:].flatten(), precision=1),
type_dict[jump.p.jump_type]))
if not do_again:
break
else:
self.A = Design(self.Linear, self.Jumps, self.Periodic)
if soln:
self.As = self.A(soln.ts)
j += 1
# load the covariances using the correlations
self.process_covariance()
if plotit:
self.plot()
else:
logger.info('ETM -> Empty design matrix')
def process_covariance(self):
cov = np.zeros((3, 1))
# save the covariance between N-E, E-U, N-U
f = self.F[0] * self.F[1] * self.F[2]
# load the covariances using the correlations
cov[0] = np.corrcoef(self.R[0][f], self.R[1][f])[0, 1] * self.factor[0] * self.factor[1]
cov[1] = np.corrcoef(self.R[1][f], self.R[2][f])[0, 1] * self.factor[1] * self.factor[2]
cov[2] = np.corrcoef(self.R[0][f], self.R[2][f])[0, 1] * self.factor[0] * self.factor[2]
# build a variance-covariance matrix
self.covar = np.diag(np.square(self.factor))
self.covar[0, 1] = cov[0]
self.covar[1, 0] = cov[0]
self.covar[2, 1] = cov[1]
self.covar[1, 2] = cov[1]
self.covar[0, 2] = cov[2]
self.covar[2, 0] = cov[2]
if not self.isPD(self.covar):
self.covar = self.nearestPD(self.covar)
def save_excluded_soln(self, cnn):
for date, f, r in zip(self.soln.date, np.logical_and(np.logical_and(self.F[0], self.F[1]), self.F[2]),
np.sqrt(np.sum(np.square(self.R), axis=0))):
if not cnn.query_float('SELECT * FROM gamit_soln_excl WHERE "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\' AND "Project" = \'%s\' AND "Year" = %i AND "DOY" = %i'
% (self.NetworkCode, self.StationCode, self.soln.stack_name, date.year, date.doy)) \
and not f:
cnn.query('INSERT INTO gamit_soln_excl ("NetworkCode", "StationCode", "Project", "Year", "DOY", '
'residual) VALUES (\'%s\', \'%s\', \'%s\', %i ,%i, %.4f)'
% (self.NetworkCode, self.StationCode, self.soln.stack_name, date.year, date.doy, r))
def save_parameters(self, cnn):
# only save the parameters when they've been estimated, not when loaded from database
if self.param_origin == ESTIMATION:
# insert linear parameters
cnn.insert('etms', row=to_postgres(self.Linear.p.toDict()))
# insert jumps
for jump in self.Jumps.table:
if jump.fit:
cnn.insert('etms', row=to_postgres(jump.p.toDict()))
# insert periodic params
cnn.insert('etms', row=to_postgres(self.Periodic.p.toDict()))
# save the variance factors
cnn.query('INSERT INTO etms ("NetworkCode", "StationCode", soln, object, params, hash, stack) VALUES '
'(\'%s\', \'%s\', \'%s\', \'var_factor\', \'%s\', %i, \'%s\')'
% (self.NetworkCode, self.StationCode, self.soln.type, to_postgres(self.factor),
self.hash, self.soln.stack_name))
def plot(self, pngfile=None, t_win=None, residuals=False, plot_missing=True,
ecef=False, plot_outliers=True, fileio=None):
import matplotlib.pyplot as plt
L = self.l * 1000
# definitions
m = []
if ecef:
labels = ('X [mm]', 'Y [mm]', 'Z [mm]')
else:
labels = (language[LANG]['north'] + ' [mm]',
language[LANG]['east'] + ' [mm]',
language[LANG]['up'] + ' [mm]')
# get filtered observations
if self.A is not None:
filt = self.F[0] * self.F[1] * self.F[2]
for i in range(3):
m.append((np.dot(self.As, self.C[i])) * 1000)
else:
filt = np.ones(self.soln.x.shape[0], dtype=bool)
# rotate to NEU
if ecef:
lneu = self.rotate_2xyz(L)
else:
lneu = L
# determine the window of the plot, if requested
if t_win is not None:
if type(t_win) is tuple:
# data range, with possibly a final value
if len(t_win) == 1:
t_win = (t_win[0], self.soln.t.max())
else:
# approximate a day in fyear
t_win = (self.soln.t.max() - t_win/365.25, self.soln.t.max())
# new behaviour: plots the time series even if there is no ETM fit
if self.A is not None:
# create the axis
if plot_outliers:
f, axis = plt.subplots(nrows=3, ncols=2, sharex=True, figsize=(15, 10)) # type: plt.subplots
axis_vect = (axis[0][0], axis[1][0], axis[2][0])
else:
f, axis = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(15, 10)) # type: plt.subplots
axis_vect = (axis[0], axis[1], axis[2])
# rotate modeled ts
if not ecef:
mneu = m
rneu = self.R
fneu = self.factor * 1000
else:
mneu = self.rotate_2xyz(m)
# rotate residuals
rneu = self.rotate_2xyz(self.R)
fneu = np.sqrt(np.diag(self.rotate_sig_cov(covar=self.covar))) * 1000
# ################# FILTERED PLOT #################
f.suptitle(language[LANG]['station'] + ' %s.%s (%s %.2f%%) lat: %.5f lon: %.5f\n'
'%s\n%s\n'
'NEU wrms [mm]: %5.2f %5.2f %5.2f' %
(self.NetworkCode, self.StationCode, self.soln.stack_name.upper(), self.soln.completion,
self.soln.lat, self.soln.lon,
self.Linear.print_parameters(np.array([self.soln.auto_x, self.soln.auto_y, self.soln.auto_z]),
self.soln.lat, self.soln.lon),
self.Periodic.print_parameters(),
fneu[0], fneu[1], fneu[2]), fontsize=9, family='monospace')
table_n, table_e, table_u = self.Jumps.print_parameters()
tables = (table_n, table_e, table_u)
for i, ax in enumerate(axis_vect):
# plot filtered time series
if not residuals:
ax.plot(self.soln.t[filt], lneu[i][filt], 'ob', markersize=2)
ax.plot(self.soln.ts, mneu[i], 'r')
# error bars
ax.plot(self.soln.ts, mneu[i] - fneu[i] * LIMIT, 'b', alpha=0.1)
ax.plot(self.soln.ts, mneu[i] + fneu[i] * LIMIT, 'b', alpha=0.1)
ax.fill_between(self.soln.ts, mneu[i] - fneu[i] * LIMIT, mneu[i] + fneu[i] * LIMIT,
antialiased=True, alpha=0.2)
else:
ax.plot(self.soln.t[filt], rneu[i][filt]*1000, 'ob', markersize=2)
# error bars
ax.plot(self.soln.ts, - np.repeat(fneu[i], self.soln.ts.shape[0]) * LIMIT, 'b', alpha=0.1)
ax.plot(self.soln.ts, np.repeat(fneu[i], self.soln.ts.shape[0]) * LIMIT, 'b', alpha=0.1)
ax.fill_between(self.soln.ts, -fneu[i] * LIMIT, fneu[i] * LIMIT, antialiased=True, alpha=0.2)
ax.grid(True)
# labels
ax.set_ylabel(labels[i])
p = ax.get_position()
f.text(0.005, p.y0, tables[i], fontsize=8, family='monospace')
# window data
self.set_lims(t_win, plt, ax)
# plot jumps
self.plot_jumps(ax)
# ################# OUTLIERS PLOT #################
if plot_outliers:
for i, ax in enumerate((axis[0][1], axis[1][1], axis[2][1])):
ax.plot(self.soln.t, lneu[i], 'oc', markersize=2)
ax.plot(self.soln.t[filt], lneu[i][filt], 'ob', markersize=2)
ax.plot(self.soln.ts, mneu[i], 'r')
# error bars
ax.plot(self.soln.ts, mneu[i] - fneu[i] * LIMIT, 'b', alpha=0.1)
ax.plot(self.soln.ts, mneu[i] + fneu[i] * LIMIT, 'b', alpha=0.1)
ax.fill_between(self.soln.ts, mneu[i] - fneu[i]*LIMIT, mneu[i] + fneu[i]*LIMIT,
antialiased=True, alpha=0.2)
self.set_lims(t_win, plt, ax)
ax.set_ylabel(labels[i])
ax.grid(True)
if plot_missing:
self.plot_missing_soln(ax)
f.subplots_adjust(left=0.18)
else:
f, axis = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(15, 10)) # type: plt.subplots
f.suptitle(language[LANG]['station'] + ' %s.%s (%s %.2f%%) lat: %.5f lon: %.5f'
% (self.NetworkCode, self.StationCode, self.soln.type.upper(), self.soln.completion,
self.soln.lat, self.soln.lon) +
'\n' + language[LANG]['not_enough'], fontsize=9, family='monospace')
for i, ax in enumerate((axis[0], axis[1], axis[2])):
ax.plot(self.soln.t, lneu[i], 'ob', markersize=2)
ax.set_ylabel(labels[i])
ax.grid(True)
self.set_lims(t_win, plt, ax)
self.plot_jumps(ax)
if plot_missing:
self.plot_missing_soln(ax)
if pngfile is not None:
plt.savefig(pngfile)
plt.close()
elif fileio is not None:
plt.savefig(fileio, format='png')
# plt.show()
fileio.seek(0) # rewind to beginning of file
plt.close()
return base64.b64encode(fileio.getvalue())
else:
self.f = f
self.picking = False
self.plt = plt
axprev = plt.axes([0.85, 0.01, 0.08, 0.055])
bcut = Button(axprev, 'Add jump', color='red', hovercolor='green')
bcut.on_clicked(self.enable_picking)
plt.show()
plt.close()
def onpick(self, event):
import dbConnection
self.f.canvas.mpl_disconnect(self.cid)
self.picking = False
print 'Epoch: %s' % pyDate.Date(fyear=event.xdata).yyyyddd()
jtype = int(input(' -- Enter type of jump (0 = mechanic; 1 = geophysical): '))
if jtype == 1:
relx = input(' -- Enter relaxation (e.g. 0.5, 0.5,0.01): ')
operation = str(raw_input(' -- Enter operation (+, -): '))
print ' >> Jump inserted'
# now insert the jump into the db
cnn = dbConnection.Cnn('gnss_data.cfg')
self.plt.close()
# reinitialize ETM
# wait for 'keep' or 'undo' command
def enable_picking(self, event):
if not self.picking:
print 'Entering picking mode'
self.picking = True
self.cid = self.f.canvas.mpl_connect('button_press_event', self.onpick)
else:
print 'Disabling picking mode'
self.picking = False
self.f.canvas.mpl_disconnect(self.cid)
def plot_hist(self, pngfile=None):
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import norm
from matplotlib.patches import Ellipse
labels = (language[LANG]['north'] + ' [mm]',
language[LANG]['east'] + ' [mm]',
language[LANG]['up'] + ' [mm]')
if self.A is not None:
filt = self.F[0] * self.F[1] * self.F[2]
f, axis = plt.subplots(nrows=2, ncols=2, figsize=(15, 10)) # type: plt.subplots
f.suptitle(language[LANG]['station'] + ' %s.%s (%s %.2f%%) lat: %.5f lon: %.5f\n'
'VAR (N E U) : %s\n'
'COV (N-E N-U E-U): %s'
% (self.NetworkCode, self.StationCode, self.soln.type.upper(), self.soln.completion,
self.soln.lat, self.soln.lon, ' '.join(['%10.3e' % i for i in np.diag(self.covar)]),
' '.join(['%10.3e' % i for i in [self.covar[0, 1], self.covar[0, 2], self.covar[1, 2]]])),
fontsize=9, family='monospace')
n = np.sqrt(np.sum(self.R ** 2, axis=0))
N = self.R[0][n <= 0.05] * 1000
E = self.R[1][n <= 0.05] * 1000
U = self.R[2][n <= 0.05] * 1000
# N-E residuals and error ellipse
ax = axis[0][0]
ax.plot(E, N, 'ob', markersize=2)
# ax.plot(E[filt], N[filt], 'ob', markersize=2)
# ax.plot(E[np.logical_not(filt)], N[np.logical_not(filt)], 'oc', markersize=2)
# process the covariance matrix
c = self.covar[0:2, 0:2]
c[1, 1], c[0, 0] = c[0, 0], c[1, 1]
w, v = np.linalg.eigh(self.covar[0:2, 0:2])
order = w.argsort()[::-1]
w, v = w[order], v[:, order]
theta = np.degrees(np.arctan2(*v[:, 0][::-1]))
ellipse = Ellipse((np.mean(self.R[1][filt]), np.mean(self.R[1][filt])),
width=2. * np.sqrt(w[0]) * 2.5 * 1000,
height=2. * np.sqrt(w[1]) * 2.5 * 1000,
angle=theta,
facecolor='none',
edgecolor='red',
zorder=3,
label=r'$2.5\sigma$')
ax.add_patch(ellipse)
ax.grid(True)
ax.set_ylabel(labels[0])
ax.set_xlabel(labels[1])
ax.set_title(language[LANG]['residual plot'] + ' ' + language[LANG]['north'] + '-' + language[LANG]['east'])
ax.axis('equal')
f.canvas.draw()
ax.legend()
nn = ax.get_ylim()
ee = ax.get_xlim()
# N histogram
ax = axis[0][1]
# (mu, sigma) = norm.fit(N)
n, bins, patches = ax.hist(N, 200, alpha=0.75, facecolor='blue', orientation='horizontal')
# y = mlab.normpdf(bins, mu, sigma)
# ax.plot(y, bins, 'r--', linewidth=2)
ax.grid(True)
ax.set_xlabel(language[LANG]['frequency'])
ax.set_ylabel(language[LANG]['N residuals'] + ' [mm]')
ax.set_title(language[LANG]['histogram plot'] + ' ' + language[LANG]['north'])
ax.set_ylim(nn)
# E histogram
ax = axis[1][0]
# (mu, sigma) = norm.fit(E)
n, bins, patches = ax.hist(E, 200, alpha=0.75, facecolor='blue')
# y = mlab.normpdf(bins, mu, sigma)
# ax.plot(bins, y, 'r--', linewidth=2)
ax.grid(True)
ax.set_ylabel(language[LANG]['frequency'])
ax.set_xlabel(language[LANG]['E residuals'] + ' [mm]')
ax.set_title(language[LANG]['histogram plot'] + ' ' + language[LANG]['east'])
ax.set_xlim(ee)
# Up histogram
ax = axis[1][1]
# (mu, sigma) = norm.fit(U)
n, bins, patches = ax.hist(U, 200, alpha=0.75, facecolor='blue')
# y = mlab.normpdf(bins, mu, sigma)
# ax.plot(bins, y, 'r--', linewidth=2)
ax.grid(True)
ax.set_ylabel(language[LANG]['frequency'])
ax.set_xlabel(language[LANG]['U residuals'] + ' [mm]')
ax.set_title(language[LANG]['histogram plot'] + ' ' + language[LANG]['up'])
#residuals = np.sqrt(np.square(L[0]) + np.square(L[1]) + np.square(L[2])) - \
# np.sqrt(np.square(np.dot(self.A, self.C[0])) + np.square(np.dot(self.A, self.C[1])) +
# np.square(np.dot(self.A, self.C[2])))
#(mu, sigma) = norm.fit(residuals)
#n, bins, patches = plt.hist(residuals, 200, normed=1, alpha=0.75, facecolor='blue')
#y = mlab.normpdf(bins, mu, sigma)
#plt.plot(bins, y, 'r--', linewidth=2)
#plt.title(r'$\mathrm{Histogram\ of\ residuals (mm):}\ \mu=%.3f,\ \sigma=%.3f$' % (mu*1000, sigma*1000))
#plt.grid(True)
if not pngfile:
plt.show()
plt.close()
else:
plt.savefig(pngfile)
plt.close()
@staticmethod
def autoscale_y(ax, margin=0.1):
"""This function rescales the y-axis based on the data that is visible given the current xlim of the axis.
ax -- a matplotlib axes object
margin -- the fraction of the total height of the y-data to pad the upper and lower ylims"""
def get_bottom_top(line):
xd = line.get_xdata()
yd = line.get_ydata()
lo, hi = ax.get_xlim()
y_displayed = yd[((xd > lo) & (xd < hi))]
h = np.max(y_displayed) - np.min(y_displayed)
bot = np.min(y_displayed) - margin * h
top = np.max(y_displayed) + margin * h
return bot, top
lines = ax.get_lines()
bot, top = np.inf, -np.inf
for line in lines:
new_bot, new_top = get_bottom_top(line)
if new_bot < bot:
bot = new_bot
if new_top > top:
top = new_top
if bot == top:
ax.autoscale(enable=True, axis='y', tight=False)
ax.autoscale(enable=False, axis='y', tight=False)
else:
ax.set_ylim(bot, top)
def set_lims(self, t_win, plt, ax):
if t_win is None:
# turn on to adjust the limits, then turn off to plot jumps
ax.autoscale(enable=True, axis='x', tight=False)
ax.autoscale(enable=False, axis='x', tight=False)
ax.autoscale(enable=True, axis='y', tight=False)
ax.autoscale(enable=False, axis='y', tight=False)
else:
if t_win[0] == t_win[1]:
t_win[0] = t_win[0] - 1./365.25
t_win[1] = t_win[1] + 1./365.25
plt.xlim(t_win)
self.autoscale_y(ax)
def plot_missing_soln(self, ax):
# plot missing solutions
for missing in self.soln.ts_ns:
ax.plot((missing, missing), ax.get_ylim(), color=(1, 0, 1, 0.2), linewidth=1)
# plot the position of the outliers
for blunder in self.soln.ts_blu:
ax.quiver((blunder, blunder), ax.get_ylim(), (0, 0), (-0.01, 0.01), scale_units='height',
units='height', pivot='tip', width=0.008, edgecolors='r')
def plot_jumps(self, ax):
for jump in self.Jumps.table:
if jump.date < self.soln.date[0] or jump.date > self.soln.date[-1]:
continue
if not jump.fit:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), ':', color='tab:gray')
elif jump.p.jump_type == GENERIC_JUMP:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), 'c:')
elif jump.p.jump_type == ANTENNA_CHANGE:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), 'b:')
elif jump.p.jump_type == REFERENCE_FRAME_JUMP:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), ':', color='tab:green')
elif jump.p.jump_type == CO_SEISMIC_JUMP_DECAY:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), 'r:')
elif jump.p.jump_type == CO_SEISMIC_JUMP:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), ':', color='tab:purple')
def todictionary(self, time_series=False, model=False):
# convert the ETM adjustment into a dictionary
# optionally, output the whole time series and evaluated model as well
L = self.l
# start with the parameters
etm = dict()
etm['Network'] = self.NetworkCode
etm['Station'] = self.StationCode
etm['lat'] = self.soln.lat[0]
etm['lon'] = self.soln.lon[0]
etm['ref_x'] = self.soln.auto_x[0]
etm['ref_y'] = self.soln.auto_y[0]
etm['ref_z'] = self.soln.auto_z[0]
etm['Jumps'] = [to_list(jump.p.toDict()) for jump in self.Jumps.table]
if self.A is not None:
etm['Polynomial'] = to_list(self.Linear.p.toDict())
etm['Periodic'] = to_list(self.Periodic.p.toDict())
etm['wrms'] = {'n': self.factor[0], 'e': self.factor[1], 'u': self.factor[2]}
etm['xyz_covariance'] = self.rotate_sig_cov(covar=self.covar).tolist()
etm['neu_covariance'] = self.covar.tolist()
if time_series:
ts = dict()
ts['t'] = np.array([self.soln.t.tolist(), self.soln.mjd.tolist()]).transpose().tolist()
ts['mjd'] = self.soln.mjd.tolist()
ts['x'] = self.soln.x.tolist()
ts['y'] = self.soln.y.tolist()
ts['z'] = self.soln.z.tolist()
ts['n'] = L[0].tolist()
ts['e'] = L[1].tolist()
ts['u'] = L[2].tolist()
ts['residuals'] = self.R.tolist()
ts['weights'] = self.P.transpose().tolist()
ts['model_neu'] = []
if model:
if self.A is not None:
for i in range(3):
ts['model_neu'].append((np.dot(self.As, self.C[i]).tolist()))
if self.A is not None:
ts['filter'] = np.logical_and(np.logical_and(self.F[0], self.F[1]), self.F[2]).tolist()
else:
ts['filter'] = []
etm['time_series'] = ts
return etm
def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, sigma_v=SIGMA_FLOOR_V, force_model=False):
# this function find the requested epochs and returns an X Y Z and sigmas
# jmp = 'pre' returns the coordinate immediately before a jump
# jmp = 'post' returns the coordinate immediately after a jump
# jmp = None returns either the coordinate before or after, depending on the time of the jump.
# find this epoch in the t vector
date = pyDate.Date(year=year, doy=doy)
window = None
for jump in self.Jumps.table:
if jump.date == date and \
jump.p.jump_type in (GENERIC_JUMP, CO_SEISMIC_JUMP_DECAY, ANTENNA_CHANGE, CO_SEISMIC_JUMP) \
and jump.fit:
if np.sqrt(np.sum(np.square(jump.p.params[:, 0]))) > 0.02:
window = jump.date
# if no pre or post specified, then determine using the time of the jump
if jmp is None:
if (jump.date.datetime().hour + jump.date.datetime().minute / 60.0) < 12:
jmp = 'post'
else:
jmp = 'pre'
# use the previous or next date to get the APR
# if jmp == 'pre':
# date -= 1
# else:
# date += 1
index = np.where(self.soln.mjd == date.mjd)
index = index[0]
neu = np.zeros((3, 1))
L = self.L
ref_pos = np.array([self.soln.auto_x, self.soln.auto_y, self.soln.auto_z])
if index.size and self.A is not None:
# found a valid epoch in the t vector
# now see if this epoch was filtered
if np.all(self.F[:, index]) and force_model is False:
# the coordinate is good
xyz = L[:, index]
sig = self.R[:, index]
source = self.soln.stack_name.upper() + ' with ETM solution: good'
else:
# the coordinate is marked as bad
# get the requested epoch from the ETM
idt = np.argmin(np.abs(self.soln.ts - date.fyear))
for i in range(3):
neu[i] = np.dot(self.As[idt, :], self.C[i])
xyz = self.rotate_2xyz(neu) + ref_pos
# Use the deviation from the ETM multiplied by 2.5 to estimate the error
sig = 2.5 * self.R[:, index]
source = self.soln.stack_name.upper() + ' with ETM solution: filtered'
elif not index.size and self.A is not None:
# the coordinate doesn't exist, get it from the ETM
idt = np.argmin(np.abs(self.soln.ts - date.fyear))
source = 'No ' + self.soln.stack_name.upper() + ' solution: ETM'
for i in range(3):
neu[i] = np.dot(self.As[idt, :], self.C[i])
xyz = self.rotate_2xyz(neu) + ref_pos
# since there is no way to estimate the error,
# use the nominal sigma multiplied by 2.5
sig = 2.5 * self.factor[:, np.newaxis]
elif index.size and self.A is None:
# no ETM (too few points), but we have a solution for the requested day
xyz = L[:, index]
# set the uncertainties in NEU by hand
sig = np.array([[9.99], [9.99], [9.99]])
source = self.soln.stack_name.upper() + ' solution, no ETM'
else:
# no ETM (too few points) and no solution for this day, get average
source = 'No ' + self.soln.stack_name.upper() + ' solution, no ETM: mean coordinate'
xyz = np.mean(L, axis=1)[:, np.newaxis]
# set the uncertainties in NEU by hand
sig = np.array([[9.99], [9.99], [9.99]])
if self.A is not None:
# get the velocity of the site
if np.sqrt(np.square(self.Linear.p.params[0, 1]) +
np.square(self.Linear.p.params[1, 1]) +
np.square(self.Linear.p.params[2, 1])) > 0.2:
# fast moving station! bump up the sigma floor
sigma_h = 99.9
sigma_v = 99.9
source += '. fast moving station, bumping up sigmas'
# apply floor sigmas
sig = np.sqrt(np.square(sig) + np.square(np.array([[sigma_h], [sigma_h], [sigma_v]])))
return xyz, sig, window, source
def rotate_2neu(self, ecef):
return np.array(ct2lg(ecef[0], ecef[1], ecef[2], self.soln.lat, self.soln.lon))
def rotate_2xyz(self, neu):
return np.array(lg2ct(neu[0], neu[1], neu[2], self.soln.lat, self.soln.lon))
def rotate_sig_cov(self, sigmas=None, covar=None):
if sigmas is None and covar is None:
raise pyETMException('Error in rotate_sig_cov: must provide either sigmas or covariance matrix')
R = rotlg2ct(self.soln.lat, self.soln.lon)
if sigmas is not None:
# build a covariance matrix based on sigmas
sd = np.diagflat(np.square(sigmas))
sd[0, 1] = self.covar[0, 1]
sd[1, 0] = self.covar[1, 0]
sd[2, 1] = self.covar[2, 1]
sd[1, 2] = self.covar[1, 2]
sd[0, 2] = self.covar[0, 2]
sd[2, 0] = self.covar[2, 0]
# check that resulting matrix is PSD:
if not self.isPD(sd):
sd = self.nearestPD(sd)
sneu = np.dot(np.dot(R[:, :, 0], sd), R[:, :, 0].transpose())
dneu = np.sqrt(np.diag(sneu))
else:
# covariance matrix given, assume it is a covariance matrix
dneu = np.dot(np.dot(R[:, :, 0], covar), R[:, :, 0].transpose())
return dneu
def nearestPD(self, A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] N.J. Higham, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = np.linalg.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if self.isPD(A3):
return A3
spacing = np.spacing(np.linalg.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not self.isPD(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += I * (-mineig * k ** 2 + spacing)
k += 1
return A3
@staticmethod
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = np.linalg.cholesky(B)
return True
except np.linalg.LinAlgError:
return False
def load_parameters(self, params, l):
factor = 1
index = []
residuals = []
p = []
for param in params:
par = np.array(param['params'])
sig = np.array(param['sigmas'])
if param['object'] == 'polynomial':
self.Linear.load_parameters(par, sig, param['t_ref'])
if param['object'] == 'periodic':
self.Periodic.load_parameters(params=par, sigmas=sig)
if param['object'] == 'jump':
for jump in self.Jumps.table:
if jump.p.hash == param['hash']:
jump.load_parameters(params=par, sigmas=sig)
if param['object'] == 'var_factor':
# already a vector in the db
factor = par
x = self.Linear.p.params
s = self.Linear.p.sigmas
for jump in self.Jumps.table:
if jump.fit:
x = np.append(x, jump.p.params, axis=1)
s = np.append(s, jump.p.sigmas, axis=1)
x = np.append(x, self.Periodic.p.params, axis=1)
s = np.append(s, self.Periodic.p.sigmas, axis=1)
for i in range(3):
residuals.append(l[i] - np.dot(self.A(constrains=False), x[i, :]))
ss = np.abs(np.divide(residuals[i], factor[i]))
index.append(ss <= LIMIT)
f = np.ones((l.shape[1],))
sw = np.power(10, LIMIT - ss[ss > LIMIT])
sw[sw < np.finfo(np.float).eps] = np.finfo(np.float).eps
f[ss > LIMIT] = sw
p.append(np.square(np.divide(f, factor[i])))
self.C = x
self.S = s
self.F = np.array(index)
self.R = np.array(residuals)
self.factor = factor
self.P = np.array(p)
def adjust_lsq(self, Ai, Li):
A = Ai(constrains=True)
L = Ai.get_l(Li, constrains=True)
cst_pass = False
iteration = 0
factor = 1
So = 1
dof = (Ai.shape[0] - Ai.shape[1])
X1 = chi2.ppf(1 - 0.05 / 2, dof)
X2 = chi2.ppf(0.05 / 2, dof)
s = np.array([])
v = np.array([])
C = np.array([])
P = Ai.get_p(constrains=True)
while not cst_pass and iteration <= 10:
W = np.sqrt(P)
Aw = np.multiply(W[:, None], A)
Lw = np.multiply(W, L)
C = np.linalg.lstsq(Aw, Lw, rcond=-1)[0]
v = L - np.dot(A, C)
# unit variance
So = np.sqrt(np.dot(v, np.multiply(P, v)) / dof)
x = np.power(So, 2) * dof
# obtain the overall uncertainty predicted by lsq
factor = factor * So
# calculate the normalized sigmas
s = np.abs(np.divide(v, factor))
if x < X2 or x > X1:
# if it falls in here it's because it didn't pass the Chi2 test
cst_pass = False
# reweigh by Mike's method of equal weight until 2 sigma
f = np.ones((v.shape[0], ))
# f[s > LIMIT] = 1. / (np.power(10, LIMIT - s[s > LIMIT]))
# do not allow sigmas > 100 m, which is basically not putting
# the observation in. Otherwise, due to a model problem
# (missing jump, etc) you end up with very unstable inversions
# f[f > 500] = 500
sw = np.power(10, LIMIT - s[s > LIMIT])
sw[sw < np.finfo(np.float).eps] = np.finfo(np.float).eps
f[s > LIMIT] = sw
P = np.square(np.divide(f, factor))
else:
cst_pass = True
iteration += 1
# make sure there are no values below eps. Otherwise matrix becomes singular
P[P < np.finfo(np.float).eps] = 1e-6
# some statistics
SS = np.linalg.inv(np.dot(A.transpose(), np.multiply(P[:, None], A)))
sigma = So*np.sqrt(np.diag(SS))
# mark observations with sigma <= LIMIT
index = Ai.remove_constrains(s <= LIMIT)
v = Ai.remove_constrains(v)
return C, sigma, index, v, factor, P
@staticmethod
def chi2inv(chi, df):
"""Return prob(chisq >= chi, with df degrees of
freedom).
df must be even.
"""
assert df & 1 == 0
# XXX If chi is very large, exp(-m) will underflow to 0.
m = chi / 2.0
sum = term = np.exp(-m)
for i in range(1, df // 2):
term *= m / i
sum += term
# With small chi and large df, accumulated
# roundoff error, plus error in
# the platform exp(), can cause this to spill
# a few ULP above 1.0. For
# example, chi2P(100, 300) on my box
# has sum == 1.0 + 2.0**-52 at this
# point. Returning a value even a teensy
# bit over 1.0 is no good.
return np.min(sum)
@staticmethod
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file, 'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
def get_outliers_list(self):
"""
Function to obtain the outliers based on the ETMs sigma
:return: a list containing the network code, station code and dates of the outliers in the time series
"""
filt = self.F[0] * self.F[1] * self.F[2]
dates = [pyDate.Date(mjd=mjd) for mjd in self.soln.mjd[~filt]]
return [(net, stn, date) for net, stn, date in zip(repeat(self.NetworkCode), repeat(self.StationCode), dates)]
class PPPETM(ETM):
def __init__(self, cnn, NetworkCode, StationCode, plotit=False, no_model=False, interseismic=None):
# load all the PPP coordinates available for this station
# exclude ppp solutions in the exclude table and any solution that is more than 100 meters from the auto coord
self.ppp_soln = PppSoln(cnn, NetworkCode, StationCode)
ETM.__init__(self, cnn, self.ppp_soln, no_model)
# no offset applied
self.L = np.array([self.soln.x,
self.soln.y,
self.soln.z])
# reduced to x y z coordinate of the station
self.l = self.rotate_2neu(np.array([self.ppp_soln.x - self.ppp_soln.auto_x,
self.ppp_soln.y - self.ppp_soln.auto_y,
self.ppp_soln.z - self.ppp_soln.auto_z]))
self.run_adjustment(cnn, self.l, plotit, self.ppp_soln)
# save the parameters to the db
# always save for PPP
self.save_parameters(cnn)
class GamitETM(ETM):
def __init__(self, cnn, NetworkCode, StationCode, plotit=False,
no_model=False, gamit_soln=None, stack_name=None, interseismic=None):
if gamit_soln is None:
self.polyhedrons = cnn.query_float('SELECT "X", "Y", "Z", "Year", "DOY" FROM stacks '
'WHERE "name" = \'%s\' AND "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\' '
'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"'
% (stack_name, NetworkCode, StationCode))
self.gamit_soln = GamitSoln(cnn, self.polyhedrons, NetworkCode, StationCode, stack_name)
else:
# load the GAMIT polyhedrons
self.gamit_soln = gamit_soln
ETM.__init__(self, cnn, self.gamit_soln, no_model, interseismic=interseismic)
# no offset applied
self.L = np.array([self.gamit_soln.x,
self.gamit_soln.y,
self.gamit_soln.z])
# reduced to x y z coordinate of the station
self.l = self.rotate_2neu(np.array([self.gamit_soln.x - self.gamit_soln.auto_x,
self.gamit_soln.y - self.gamit_soln.auto_y,
self.gamit_soln.z - self.gamit_soln.auto_z]))
if interseismic:
self.l -= self.Linear.interseismic
self.run_adjustment(cnn, self.l, plotit, self.gamit_soln)
# save parameters to db
# the object will also save parameters if the list object is invoked
self.save_parameters(cnn)
def get_etm_soln_list(self, use_ppp_model=False, cnn=None):
# this function return the values of the ETM ONLY
dict_o = []
if self.A is not None:
neu = []
if not use_ppp_model:
# get residuals from GAMIT solutions to GAMIT model
for i in range(3):
neu.append(np.dot(self.A, self.C[i]))
else:
# get residuals from GAMIT solutions to PPP model
etm = PPPETM(cnn, self.NetworkCode, self.StationCode)
# DDG: 20-SEP-2018 compare using MJD not FYEAR to avoid round off errors
index = np.isin(etm.soln.mjds, self.soln.mjd)
for i in range(3):
# use the etm object to obtain the design matrix that matches the dimensions of self.soln.t
neu.append(np.dot(etm.As[index, :], etm.C[i]))
del etm
rxyz = self.rotate_2xyz(np.array(neu)) + np.array([self.soln.auto_x, self.soln.auto_y, self.soln.auto_z])
dict_o += [(net_stn, x, y, z, year, doy, fyear)
for x, y, z, net_stn, year, doy, fyear in
zip(rxyz[0].tolist(), rxyz[1].tolist(), rxyz[2].tolist(),
repeat(self.NetworkCode + '.' + self.StationCode),
[date.year for date in self.gamit_soln.date],
[date.doy for date in self.gamit_soln.date],
[date.fyear for date in self.gamit_soln.date])]
else:
raise pyETMException_NoDesignMatrix('No design matrix available for %s.%s' %
(self.NetworkCode, self.StationCode))
return dict_o
class DailyRep(ETM):
def __init__(self, cnn, NetworkCode, StationCode, plotit=False,
no_model=False, gamit_soln=None, project=None):
if gamit_soln is None:
self.polyhedrons = cnn.query_float('SELECT "X", "Y", "Z", "Year", "DOY" FROM gamit_soln '
'WHERE "Project" = \'%s\' AND "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\' '
'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"'
% (project, NetworkCode, StationCode))
self.gamit_soln = GamitSoln(cnn, self.polyhedrons, NetworkCode, StationCode, project)
else:
# load the GAMIT polyhedrons
self.gamit_soln = gamit_soln
ETM.__init__(self, cnn, self.gamit_soln, no_model, False, False, False)
# the the solution type to dra
self.soln.type = 'dra'
# for repetitivities, vector with difference
self.l = self.rotate_2neu(np.array([self.gamit_soln.x,
self.gamit_soln.y,
self.gamit_soln.z]))
# for repetitivities, same vector for both
self.L = self.l
self.run_adjustment(cnn, self.l, plotit, self.gamit_soln)
# only save the excluded solutions in this module (DailyRep)
self.save_excluded_soln(cnn)
def get_residuals_dict(self):
# this function return the values of the ETM ONLY
dict_o = []
if self.A is not None:
neu = []
for i in range(3):
neu.append(np.dot(self.A, self.C[i]))
xyz = self.rotate_2xyz(np.array(neu)) + np.array([self.soln.auto_x, self.soln.auto_y, self.soln.auto_z])
rxyz = xyz - self.L
px = np.ones(self.P[0].shape)
py = np.ones(self.P[1].shape)
pz = np.ones(self.P[2].shape)
dict_o += [(net, stn, x, y, z, sigx, sigy, sigz, year, doy)
for x, y, z, sigx, sigy, sigz, net, stn, year, doy in
zip(rxyz[0].tolist(), rxyz[1].tolist(), rxyz[2].tolist(),
px.tolist(), py.tolist(), pz.tolist(),
repeat(self.NetworkCode), repeat(self.StationCode),
[date.year for date in self.gamit_soln.date],
[date.doy for date in self.gamit_soln.date])]
else:
raise pyETMException_NoDesignMatrix('No design matrix available for %s.%s' %
(self.NetworkCode, self.StationCode))
return dict_o
class FileETM(ETM):
def __init__(self, cnn, poly_list=None, plotit=False, no_model=False):
ETM.__init__(self, cnn, poly_list, no_model)
self.soln.type = 'file'
# no offset applied
self.L = np.array([self.soln.x,
self.soln.y,
self.soln.z])
# reduced to x y z coordinate of the station
self.l = self.rotate_2neu(np.array([self.soln.x - self.soln.auto_x,
self.soln.y - self.soln.auto_y,
self.soln.z - self.soln.auto_z]))
self.run_adjustment(cnn, self.l, plotit, poly_list)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.