repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
llvm-mirror/llvm | refs/heads/master | utils/lit/lit/display.py | 5 | import sys
import lit.ProgressBar
def create_display(opts, tests, total_tests, workers):
if opts.quiet:
return NopProgressDisplay()
of_total = (' of %d' % total_tests) if (tests != total_tests) else ''
header = '-- Testing: %d%s tests, %d workers --' % (tests, of_total, workers)
progress_bar = None
if opts.succinct and opts.useProgressBar:
try:
tc = lit.ProgressBar.TerminalController()
progress_bar = lit.ProgressBar.ProgressBar(tc, header)
except ValueError:
print(header)
progress_bar = lit.ProgressBar.SimpleProgressBar('Testing: ')
else:
print(header)
if progress_bar:
progress_bar.update(0, '')
return ProgressDisplay(opts, tests, progress_bar)
class NopProgressDisplay(object):
def update(self, test): pass
def finish(self): pass
class ProgressDisplay(object):
def __init__(self, opts, numTests, progressBar):
self.opts = opts
self.numTests = numTests
self.progressBar = progressBar
self.completed = 0
def finish(self):
if self.progressBar:
self.progressBar.clear()
elif self.opts.succinct:
sys.stdout.write('\n')
def update(self, test):
self.completed += 1
show_result = test.result.code.isFailure or \
self.opts.showAllOutput or \
(not self.opts.quiet and not self.opts.succinct)
if show_result:
self.print_result(test)
if self.progressBar:
percent = float(self.completed) / self.numTests
self.progressBar.update(percent, test.getFullName())
def print_result(self, test):
if self.progressBar:
self.progressBar.clear()
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))
# Show the test failure output, if requested.
if (test.result.code.isFailure and self.opts.showOutput) or \
self.opts.showAllOutput:
if test.result.code.isFailure:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)
# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Report micro-tests, if present
if test.result.microResults:
items = sorted(test.result.microResults.items())
for micro_test_name, micro_test in items:
print("%s MICRO-TEST: %s" %
('*'*3, micro_test_name))
if micro_test.metrics:
sorted_metrics = sorted(micro_test.metrics.items())
for metric_name, value in sorted_metrics:
print(' %s: %s ' % (metric_name, value.format()))
# Ensure the output is flushed.
sys.stdout.flush()
|
isenseDev/Teaching | refs/heads/master | ExampleCode/Python/OldReferenceCode/GET/Get_ProjFields.py | 2 | """
This script will be a test of getting a project's fields
"""
import requests, json, sys
# This function will loop until the user tells it to quit.
def loop_user():
print "***************************"
print "* GET FIELDS Script *"
print "***************************"
# Get a project ID from the user. Format the URL for a GET request.
project_ID = raw_input("Enter a project ID: ")
# GET Request: /api/v1/projects/XX (XX = PROJECT ID)
URL = "http://rsense-dev.cs.uml.edu/api/v1/projects/%s" % project_ID
r = []
r = requests.get(URL)
status = r.status_code # Save this to compare later.
# Convert to Python List
arg = r.json()
# This GET request returns either a 200 or a 404 Not Found.
# So we should check to see if the project ID that the user gave us is valid.
if status == 200:
# Print out the project information.
print "\nPROJECT INFORMATION"
print " Project ID: %s" % r.json()['id']
print "Project Name: %s" % r.json()['name']
print " Project URL: %s" % r.json()['url']
# Let's make sure there are fields for us to check. FieldCount should be 0 if there aren't any fields to show.
empty = r.json()['fieldCount']
# If there aren't any fields, just go back to the beginning.
if empty == 0:
print "\nI didn't find any fields for this project! Try a different Project that has fields."
loop_again() # See if they want to search for more projects.
# Even more info:
print "\nName & ID of each field found in this project: "
arg = r.json()['fields']
for i in arg:
print "Field Name: %s" % i['name']
print "Field ID: %s" % i['id']
loop_again() # See if they want to search for more projects.
# 404 means not found.
if status == 404:
print "\nCouldn't find that project! You had entered %s for a project ID." % project_ID
print "Try a project ID that exists."
loop_again()
def loop_again():
# Ask the user if they want to continue.
try_again = raw_input("\nWould you like to search again? (Y/N) -> ")
if try_again == 'Y' or try_again == 'y':
print "\n"
loop_user()
else:
exit_script()
# Prints out after the looping is over.
def exit_script():
print "\nExiting..."
sys.exit()
loop_user() |
rayjim/hadoop_design_pattern | refs/heads/master | contrib/hod/hodlib/__init__.py | 364 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
|
idem2lyon/persomov | refs/heads/master | libs/pyutil/scripts/passphrase.py | 92 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse, math, random
from pyutil.mathutil import div_ceil
from pkg_resources import resource_stream
def recursive_subset_sum(entropy_needed, wordlists):
# Pick a minimalish set of numbers which sum to at least
# entropy_needed.
# Okay now what's the smallest number of words which will give us
# at least this much entropy?
entropy_of_biggest_wordlist = wordlists[-1][0]
assert isinstance(entropy_of_biggest_wordlist, float), wordlists[-1]
needed_words = div_ceil(entropy_needed, entropy_of_biggest_wordlist)
# How much entropy do we need from each word?
needed_entropy_per_word = entropy_needed / needed_words
# What's the smallest wordlist that offers at least this much
# entropy per word?
for (wlentropy, wl) in wordlists:
if wlentropy >= needed_entropy_per_word:
break
assert wlentropy >= needed_entropy_per_word, (wlentropy, needed_entropy_per_word)
result = [(wlentropy, wl)]
# If we need more, recurse...
if wlentropy < entropy_needed:
rest = recursive_subset_sum(entropy_needed - wlentropy, wordlists)
result.extend(rest)
return result
def gen_passphrase(entropy, allwords):
maxlenwords = []
i = 2 # The smallest set is words of length 1 or 2.
words = [x for x in allwords if len(x) <= i]
maxlenwords.append((math.log(len(words), 2), words))
while len(maxlenwords[-1][1]) < len(allwords):
i += 1
words = [x for x in allwords if len(x) <= i]
maxlenwords.append((math.log(len(words), 2), words))
sr = random.SystemRandom()
passphrase = []
wordlists_to_use = recursive_subset_sum(entropy, maxlenwords)
passphraseentropy = 0.0
for (wle, wl) in wordlists_to_use:
passphrase.append(sr.choice(wl))
passphraseentropy += wle
return (u".".join(passphrase), passphraseentropy)
def main():
parser = argparse.ArgumentParser(prog="chbs", description="Create a random passphrase by picking a few random words.")
parser.add_argument('-d', '--dictionary', help="what file to read a list of words from (or omit this option to use chbs's bundled dictionary)", type=argparse.FileType('rU'), metavar="DICT")
parser.add_argument('bits', help="how many bits of entropy minimum", type=float, metavar="BITS")
args = parser.parse_args()
dicti = args.dictionary
if not dicti:
dicti = resource_stream('pyutil', 'data/wordlist.txt')
allwords = set([x.decode('utf-8').strip().lower() for x in dicti.readlines()])
passphrase, bits = gen_passphrase(args.bits, allwords)
print u"Your new password is: '%s'. It is worth about %s bits." % (passphrase, bits)
|
ncleaton/indumpco | refs/heads/master | tests/tutil.py | 1 | import tempfile, shutil, os, sys
from nose.tools import assert_equals
import indumpco
class IndumpcoUnderTest(object):
def __init__(self, input_data, reuse_dumpdirs=[], remote_segs=None):
self.basedir = tempfile.mkdtemp()
self.tmpdir = os.path.join(self.basedir, 'tmp')
os.mkdir(self.tmpdir)
self.reuse_dumpdirs = reuse_dumpdirs
self.remote_segs = remote_segs
self.dumpdir = os.path.join(self.basedir, 'd')
self.blockdir = os.path.join(self.dumpdir, 'blocks')
remseg_file = None
if remote_segs is not None:
remseg_file = os.path.join(self.tmpdir, 'remotesegs')
rs = open(remseg_file, 'w')
for remseg in remote_segs:
rs.write(remseg + "\n")
rs.close()
tmp = os.path.join(self.tmpdir, 'input')
f = open(tmp, 'w')
f.write(input_data)
f.close()
f = open(tmp)
indumpco.create_dump(src_fh=f, outdir=self.dumpdir, dumpdirs_for_reuse=reuse_dumpdirs, remote_seg_list_file=remseg_file)
os.unlink(tmp)
if remseg_file is not None:
os.unlink(remseg_file)
# Gather stats on segment reuse. This works by counting hardlinks on
# segment files, so unless we do it now the deletion of the dump(s)
# from which we've reused segments might confuse the count.
self._compute_segments_new_reused_absent()
def _compute_segments_new_reused_absent(self):
idxpath = os.path.join(self.dumpdir, 'index')
self.new_segs, self.reused_segs, self.absent_segs = 0, 0, 0
self.set_of_digests = set()
for line in open(idxpath):
segsum = line.strip().split()[1]
self.set_of_digests.add(segsum)
path = os.path.join(self.blockdir, segsum)
if os.path.exists(path):
nlink = os.lstat(path).st_nlink
if nlink == 1:
# a segment unique to this dump
self.new_segs += 1
else:
# a reused segment hardlinked from another dump
self.reused_segs += 1
else:
self.absent_segs += 1
def delete_data(self):
if os.path.exists(self.basedir) and 'INDUMPCO_TEST_NODEL' not in os.environ:
shutil.rmtree(self.basedir)
def __del__(self):
self.delete_data()
def restore_to_string(self, extra_blkdirs=[]):
if not os.path.exists(self.basedir):
raise RuntimeError('restore attempt after data deleted', (self, self.basedir))
return ''.join((seg for seg in indumpco.extract_dump(self.dumpdir, extra_blkdirs)))
def check_indumpco_restores_input(input_str, mangler_callback=None):
idc = IndumpcoUnderTest(input_str)
if mangler_callback is not None:
mangler_callback(idc)
assert_equals(idc.restore_to_string(), input_str)
|
cheerss/Fast-Style-Combination | refs/heads/master | vgg.py | 3 | # Copyright (c) 2015-2017 Anish Athalye. Released under GPLv3.
import tensorflow as tf
import numpy as np
import scipy.io
VGG19_LAYERS = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
def load_net(data_path):
data = scipy.io.loadmat(data_path)
mean = data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = data['layers'][0]
return weights, mean_pixel
def net_preloaded(weights, input_image, pooling):
net = {}
current = input_image
for i, name in enumerate(VGG19_LAYERS):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = _conv_layer(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = _pool_layer(current, pooling)
net[name] = current
assert len(net) == len(VGG19_LAYERS)
return net
def _conv_layer(input, weights, bias):
conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, 1, 1, 1),
padding='SAME')
return tf.nn.bias_add(conv, bias)
def _pool_layer(input, pooling):
if pooling == 'avg':
return tf.nn.avg_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
else:
return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
def preprocess(image, mean_pixel):
return image - mean_pixel
def unprocess(image, mean_pixel):
return image + mean_pixel
|
UDOOboard/Arduino | refs/heads/udoo-1.5.8 | app/src/processing/app/i18n/python/requests/packages/charade/langthaimodel.py | 206 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
blairconrad/LibraryHippo | refs/heads/master | App/utils/soup.py | 2 | #!/usr/bin/env python
from BeautifulSoup import Comment
def remove_comments(element):
comments = element.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
def text(element):
if isinstance(element, unicode):
return element.strip()
return [e.strip() for e in element.recursiveChildGenerator() if isinstance(e, unicode) and e.strip()]
|
bufferapp/buffer-django-nonrel | refs/heads/master | django/contrib/flatpages/templatetags/flatpages.py | 246 | from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
register = template.Library()
class FlatpageNode(template.Node):
def __init__(self, context_name, starts_with=None, user=None):
self.context_name = context_name
if starts_with:
self.starts_with = template.Variable(starts_with)
else:
self.starts_with = None
if user:
self.user = template.Variable(user)
else:
self.user = None
def render(self, context):
flatpages = FlatPage.objects.filter(sites__id=settings.SITE_ID)
# If a prefix was specified, add a filter
if self.starts_with:
flatpages = flatpages.filter(
url__startswith=self.starts_with.resolve(context))
# If the provided user is not authenticated, or no user
# was provided, filter the list to only public flatpages.
if self.user:
user = self.user.resolve(context)
if not user.is_authenticated():
flatpages = flatpages.filter(registration_required=False)
else:
flatpages = flatpages.filter(registration_required=False)
context[self.context_name] = flatpages
return ''
def get_flatpages(parser, token):
"""
Retrieves all flatpage objects available for the current site and
visible to the specific user (or visible to all users if no user is
specified). Populates the template context with them in a variable
whose name is defined by the ``as`` clause.
An optional ``for`` clause can be used to control the user whose
permissions are to be used in determining which flatpages are visible.
An optional argument, ``starts_with``, can be applied to limit the
returned flatpages to those beginning with a particular base URL.
This argument can be passed as a variable or a string, as it resolves
from the template context.
Syntax::
{% get_flatpages ['url_starts_with'] [for user] as context_name %}
Example usage::
{% get_flatpages as flatpages %}
{% get_flatpages for someuser as flatpages %}
{% get_flatpages '/about/' as about_pages %}
{% get_flatpages prefix as about_pages %}
{% get_flatpages '/about/' for someuser as about_pages %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"['url_starts_with'] [for user] as context_name" %
dict(tag_name=bits[0]))
# Must have at 3-6 bits in the tag
if len(bits) >= 3 and len(bits) <= 6:
# If there's an even number of bits, there's no prefix
if len(bits) % 2 == 0:
prefix = bits[1]
else:
prefix = None
# The very last bit must be the context name
if bits[-2] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
# If there are 5 or 6 bits, there is a user defined
if len(bits) >= 5:
if bits[-4] != 'for':
raise template.TemplateSyntaxError(syntax_message)
user = bits[-3]
else:
user = None
return FlatpageNode(context_name, starts_with=prefix, user=user)
else:
raise template.TemplateSyntaxError(syntax_message)
register.tag('get_flatpages', get_flatpages)
|
prakritish/ansible | refs/heads/devel | lib/ansible/utils/__init__.py | 2520 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
texastribune/newsapps-django-template | refs/heads/master | project_name/project_name/settings/local.py | 1 | ##################
# LOCAL SETTINGS #
##################
import os
import dj_database_url
from .base import *
#######################
# DEBUG CONFIGURATION #
#######################
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
##########################
# DATABASE CONFIGURATION #
##########################
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
# https://github.com/kennethreitz/dj-database-url
DATABASES = {
'default': dj_database_url.config(
default='sqlite:///{}'.format(os.path.join(BASE_DIR, 'default.db'))
)
}
#######################
# CACHE CONFIGURATION #
#######################
# See: https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
######################################
# DJANGO DEBUG TOOLBAR CONFIGURATION #
######################################
INSTALLED_APPS += (
'debug_toolbar',
)
|
leansoft/edx-platform | refs/heads/master | cms/djangoapps/contentstore/views/tests/test_credit_eligibility.py | 121 | """
Unit tests for credit eligibility UI in Studio.
"""
import mock
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from xmodule.modulestore.tests.factories import CourseFactory
from openedx.core.djangoapps.credit.api import get_credit_requirements
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.signals import on_course_publish
class CreditEligibilityTest(CourseTestCase):
"""Base class to test the course settings details view in Studio for credit
eligibility requirements.
"""
def setUp(self):
super(CreditEligibilityTest, self).setUp()
self.course = CourseFactory.create(org='edX', number='dummy', display_name='Credit Course')
self.course_details_url = reverse_course_url('settings_handler', unicode(self.course.id))
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_CREDIT_ELIGIBILITY': False})
def test_course_details_with_disabled_setting(self):
"""Test that user don't see credit eligibility requirements in response
if the feature flag 'ENABLE_CREDIT_ELIGIBILITY' is not enabled.
"""
response = self.client.get_html(self.course_details_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Course Credit Requirements")
self.assertNotContains(response, "Steps required to earn course credit")
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_CREDIT_ELIGIBILITY': True})
def test_course_details_with_enabled_setting(self):
"""Test that credit eligibility requirements are present in
response if the feature flag 'ENABLE_CREDIT_ELIGIBILITY' is enabled.
"""
# verify that credit eligibility requirements block don't show if the
# course is not set as credit course
response = self.client.get_html(self.course_details_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Course Credit Requirements")
self.assertNotContains(response, "Steps required to earn course credit")
# verify that credit eligibility requirements block shows if the
# course is set as credit course and it has eligibility requirements
credit_course = CreditCourse(course_key=unicode(self.course.id), enabled=True)
credit_course.save()
self.assertEqual(len(get_credit_requirements(self.course.id)), 0)
# test that after publishing course, minimum grade requirement is added
on_course_publish(self.course.id)
self.assertEqual(len(get_credit_requirements(self.course.id)), 1)
response = self.client.get_html(self.course_details_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Course Credit Requirements")
self.assertContains(response, "Steps required to earn course credit")
|
eahneahn/free | refs/heads/master | lib/python2.7/site-packages/pygments/lexers/_mapping.py | 68 | # -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping defintions. This file is generated by itself. Everytime
you change something on a builtin lexer defintion, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXERS = {
'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.text', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit', 'Autoit'), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk',), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.other', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.text', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat',), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BlitzMaxLexer': ('pygments.lexers.compiled', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BrainfuckLexer': ('pygments.lexers.other', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.other', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.math', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CLexer': ('pygments.lexers.compiled', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.text', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65', ('ca65',), ('*.s',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.other', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.other', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrocLexer': ('pygments.lexers.agile', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.web', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.compiled', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.compiled', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.text', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.web', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control',), ('control',), ()),
'DelphiLexer': ('pygments.lexers.compiled', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.agile', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.text', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DtdLexer': ('pygments.lexers.web', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.compiled', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.compiled', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.compiled', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.other', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.compiled', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'ElixirConsoleLexer': ('pygments.lexers.functional', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.functional', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.functional', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.functional', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.agile', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.agile', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.compiled', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('Clipper', 'XBase'), ('*.PRG', '*.prg'), ()),
'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas',), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.text', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('Cucumber', 'cucumber', 'Gherkin', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoodDataCLLexer': ('pygments.lexers.other', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)),
'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.web', 'haXe', ('hx', 'haXe'), ('*.hx',), ('text/haxe',)),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.text', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.text', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.text', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade', 'JADE'), ('*.jade',), ('text/x-jade',)),
'JagsLexer': ('pygments.lexers.math', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.web', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JsonLexer': ('pygments.lexers.web', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.math', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.math', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'KconfigLexer': ('pygments.lexers.other', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.functional', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.web', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.other', 'Logtalk', ('logtalk',), ('*.lgt',), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.agile', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode',), ('*.moo',), ('text/x-moocode',)),
'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.compiled', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.compiled', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.web', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.functional', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.math', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.functional', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.other', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript',), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.other', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1'), ('*.ps1',), ('text/x-powershell',)),
'PrologLexer': ('pygments.lexers.compiled', 'Prolog', ('prolog',), ('*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties',), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf',), ('*.proto',), ()),
'PuppetLexer': ('pygments.lexers.other', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.text', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.agile', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
'QmlLexer': ('pygments.lexers.web', 'QML', ('qml', 'Qt Meta Language', 'Qt modeling Language'), ('*.qml',), ('application/x-qml',)),
'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RPMSpecLexer': ('pygments.lexers.other', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.functional', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.math', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.other', 'REBOL', ('rebol',), ('*.r', '*.r3'), ('text/x-rebol',)),
'RedcodeLexer': ('pygments.lexers.other', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.text', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('RobotFramework', 'robotframework'), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml', 'SCAML'), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.math', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.web', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.other', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SourcePawnLexer': ('pygments.lexers.other', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list'), ('sources.list',), ()),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.text', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.math', 'Stan', ('stan',), ('*.stan',), ()),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TclLexer': ('pygments.lexers.agile', 'Tcl', ('tcl',), ('*.tcl',), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TexLexer': ('pygments.lexers.text', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TypeScriptLexer': ('pygments.lexers.web', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
'UrbiscriptLexer': ('pygments.lexers.other', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VGLLexer': ('pygments.lexers.other', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.compiled', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.text', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'XQueryLexer': ('pygments.lexers.web', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'YamlLexer': ('pygments.lexers.text', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
}
if __name__ == '__main__':
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers.%s' % filename[:-3]
print module_name
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them, that should make the diff files for svn smaller
found_lexers.sort()
# extract useful sourcecode from this file
f = open(__file__)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
f = open(__file__, 'wb')
f.write(header)
f.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
f.write(footer)
f.close()
|
hujiajie/chromium-crosswalk | refs/heads/master | net/data/verify_certificate_chain_unittest/generate-intermediary-lacks-signing-key-usage.py | 16 | #!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 1 intermediary and a trusted root. The intermediary
contains a keyUsage extension, HOWEVER it does not contain the keyCertSign bit.
Hence validation is expected to fail."""
import common
# Self-signed root certificate (part of trust store).
root = common.create_self_signed_root_certificate('Root')
# Intermediary that is missing keyCertSign.
intermediary = common.create_intermediary_certificate('Intermediary', root)
intermediary.get_extensions().set_property('keyUsage',
'critical,digitalSignature,keyEncipherment')
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediary)
chain = [target, intermediary]
trusted = [root]
time = common.DEFAULT_TIME
verify_result = False
common.write_test_file(__doc__, chain, trusted, time, verify_result)
|
silenceli/nova | refs/heads/master | nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py | 2 | # Copyright (c) 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mox3 import mox
from oslo.config import cfg
from oslo.serialization import jsonutils
from webob import exc
from nova.api.openstack.compute import extensions
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import block_device_mapping_v1 as \
block_device_mapping
from nova.api.openstack.compute.plugins.v3 import servers as servers_v3
from nova.api.openstack.compute import servers as servers_v2
from nova.compute import api as compute_api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
CONF = cfg.CONF
class BlockDeviceMappingTestV21(test.TestCase):
validation_error = exception.ValidationError
def _setup_controller(self):
ext_info = plugins.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
'osapi_v3')
self.controller = servers_v3.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist',
['os-block-device-mapping-v1',
'os-block-device-mapping'],
'osapi_v3')
self.no_volumes_controller = servers_v3.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', '', 'osapi_v3')
def setUp(self):
super(BlockDeviceMappingTestV21, self).setUp()
self._setup_controller()
fake.stub_out_image_service(self.stubs)
self.volume_id = fakes.FAKE_UUID
self.bdm = [{
'no_device': None,
'virtual_name': 'root',
'volume_id': self.volume_id,
'device_name': 'vda',
'delete_on_termination': False
}]
def _get_servers_body(self, no_image=False):
body = {
'server': {
'name': 'server_test',
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'flavorRef': 'http://localhost/123/flavors/3',
'metadata': {
'hello': 'world',
'open': 'stack',
},
},
}
if no_image:
del body['server']['imageRef']
return body
def _test_create(self, params, no_image=False, override_controller=None):
body = self._get_servers_body(no_image)
body['server'].update(params)
req = fakes.HTTPRequest.blank('/v2/fake/servers')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps(body)
if override_controller:
override_controller.create(req, body=body).obj['server']
else:
self.controller.create(req, body=body).obj['server']
def test_create_instance_with_volumes_enabled(self):
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self._test_create(params)
def test_create_instance_with_volumes_enabled_and_bdms_no_image(self):
"""Test that the create works if there is no image supplied but
os-volumes extension is enabled and bdms are supplied
"""
self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
volume = {
'id': 1,
'status': 'active',
'volume_image_metadata':
{'test_key': 'test_value'}
}
compute_api.API._validate_bdm(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(True)
compute_api.API._get_bdm_image_metadata(mox.IgnoreArg(),
self.bdm,
True).AndReturn(volume)
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.mox.ReplayAll()
self._test_create(params, no_image=True)
def test_create_instance_with_volumes_disabled(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn(block_device_mapping, kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create(params,
override_controller=self.no_volumes_controller)
@mock.patch('nova.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
bdm = [{
'volume_id': self.volume_id,
'device_name': 'vda'
}]
params = {'block_device_mapping': bdm}
fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
self.assertRaises(exc.HTTPBadRequest,
self._test_create, params, no_image=True)
def test_create_instance_with_device_name_not_string(self):
self.bdm[0]['device_name'] = 123
old_create = compute_api.API.create
self.params = {'block_device_mapping': self.bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_snapshot_volume_id_none(self):
old_create = compute_api.API.create
bdm = [{
'no_device': None,
'snapshot_id': None,
'volume_id': None,
'device_name': 'vda',
'delete_on_termination': False
}]
self.params = {'block_device_mapping': bdm}
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, self.params)
@mock.patch.object(compute_api.API, 'create')
def test_create_instance_with_bdm_param_not_list(self, mock_create):
self.params = {'block_device_mapping': '/dev/vdb'}
self.assertRaises(self.validation_error,
self._test_create, self.params)
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_device_name_too_long(self):
self.bdm[0]['device_name'] = 'a' * 256,
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_space_in_device_name(self):
self.bdm[0]['device_name'] = 'vd a',
params = {'block_device_mapping': self.bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertTrue(kwargs['legacy_bdm'])
self.assertEqual(kwargs['block_device_mapping'], self.bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_invalid_size(self):
bdm = [{'delete_on_termination': True,
'device_name': 'vda',
'volume_size': "hello world",
'volume_id': '11111111-1111-1111-1111-111111111111'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['block_device_mapping'], bdm)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self.assertRaises(self.validation_error,
self._test_create, params)
def test_create_instance_with_bdm_delete_on_termination(self):
bdm = [{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'True'},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 'invalid'},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
expected_bdm = [
{'device_name': 'foo1', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo2', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True},
{'device_name': 'foo3', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo4', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False},
{'device_name': 'foo5', 'volume_id': fakes.FAKE_UUID,
'delete_on_termination': False}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(expected_bdm, kwargs['block_device_mapping'])
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
self._test_create(params)
def test_create_instance_decide_format_legacy(self):
ext_info = plugins.LoadedExtensionInfo()
CONF.set_override('extensions_blacklist',
['os-block-device-mapping',
'os-block-device-mapping-v1'],
'osapi_v3')
controller = servers_v3.ServersController(extension_info=ext_info)
bdm = [{'device_name': 'foo1',
'volume_id': fakes.FAKE_UUID,
'delete_on_termination': True}]
expected_legacy_flag = True
old_create = compute_api.API.create
def create(*args, **kwargs):
legacy_bdm = kwargs.get('legacy_bdm', True)
self.assertEqual(legacy_bdm, expected_legacy_flag)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm',
_validate_bdm)
self._test_create({}, override_controller=controller)
params = {'block_device_mapping': bdm}
self._test_create(params, override_controller=controller)
def test_create_instance_both_bdm_formats(self):
bdm = [{'device_name': 'foo'}]
bdm_v2 = [{'source_type': 'volume',
'uuid': 'fake_vol'}]
params = {'block_device_mapping': bdm,
'block_device_mapping_v2': bdm_v2}
self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
validation_error = exc.HTTPBadRequest
def _setup_controller(self):
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {'os-volumes': 'fake'}
self.controller = servers_v2.Controller(self.ext_mgr)
self.ext_mgr_no_vols = extensions.ExtensionManager()
self.ext_mgr_no_vols.extensions = {}
self.no_volumes_controller = servers_v2.Controller(
self.ext_mgr_no_vols)
def test_create_instance_with_volumes_disabled(self):
bdm = [{'device_name': 'foo'}]
params = {'block_device_mapping': bdm}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsNone(kwargs['block_device_mapping'])
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create(params,
override_controller=self.no_volumes_controller)
def test_create_instance_decide_format_legacy(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-volumes': 'fake',
'os-block-device-mapping-v2-boot': 'fake'}
controller = servers_v2.Controller(self.ext_mgr)
bdm = [{'device_name': 'foo1',
'volume_id': fakes.FAKE_UUID,
'delete_on_termination': 1}]
expected_legacy_flag = True
old_create = compute_api.API.create
def create(*args, **kwargs):
legacy_bdm = kwargs.get('legacy_bdm', True)
self.assertEqual(legacy_bdm, expected_legacy_flag)
return old_create(*args, **kwargs)
def _validate_bdm(*args, **kwargs):
pass
self.stubs.Set(compute_api.API, 'create', create)
self.stubs.Set(compute_api.API, '_validate_bdm',
_validate_bdm)
self._test_create({}, override_controller=controller)
params = {'block_device_mapping': bdm}
self._test_create(params, override_controller=controller)
|
xijunlee/leetcode | refs/heads/master | 462.py | 1 | #!/usr/bin/env python
# coding=utf-8
class Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
ret = 0
mid = len(nums)/2
for i in range(len(nums)):
ret += abs(nums[i] - nums[mid])
return ret |
bdh1011/cupeye | refs/heads/master | venv/lib/python2.7/site-packages/passlib/handlers/windows.py | 21 | """passlib.handlers.nthash - Microsoft Windows -related hashes"""
#=============================================================================
# imports
#=============================================================================
# core
from binascii import hexlify
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from passlib.utils import to_unicode, right_pad_string
from passlib.utils.compat import b, bytes, str_to_uascii, u, unicode, uascii_to_str
from passlib.utils.md4 import md4
import passlib.utils.handlers as uh
# local
__all__ = [
"lmhash",
"nthash",
"bsd_nthash",
"msdcc",
"msdcc2",
]
#=============================================================================
# lanman hash
#=============================================================================
class lmhash(uh.HasEncodingContext, uh.StaticHandler):
"""This class implements the Lan Manager Password hash, and follows the :ref:`password-hash-api`.
It has no salt and a single fixed round.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.verify` methods accept a single
optional keyword:
:type encoding: str
:param encoding:
This specifies what character encoding LMHASH should use when
calculating digest. It defaults to ``cp437``, the most
common encoding encountered.
Note that while this class outputs digests in lower-case hexidecimal,
it will accept upper-case as well.
"""
#===================================================================
# class attrs
#===================================================================
name = "lmhash"
checksum_chars = uh.HEX_CHARS
checksum_size = 32
default_encoding = "cp437"
#===================================================================
# methods
#===================================================================
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
return hexlify(self.raw(secret, self.encoding)).decode("ascii")
# magic constant used by LMHASH
_magic = b("KGS!@#$%")
@classmethod
def raw(cls, secret, encoding=None):
"""encode password using LANMAN hash algorithm.
:type secret: unicode or utf-8 encoded bytes
:arg secret: secret to hash
:type encoding: str
:arg encoding:
optional encoding to use for unicode inputs.
this defaults to ``cp437``, which is the
common case for most situations.
:returns: returns string of raw bytes
"""
if not encoding:
encoding = cls.default_encoding
# some nice empircal data re: different encodings is at...
# http://www.openwall.com/lists/john-dev/2011/08/01/2
# http://www.freerainbowtables.com/phpBB3/viewtopic.php?t=387&p=12163
from passlib.utils.des import des_encrypt_block
MAGIC = cls._magic
if isinstance(secret, unicode):
# perform uppercasing while we're still unicode,
# to give a better shot at getting non-ascii chars right.
# (though some codepages do NOT upper-case the same as unicode).
secret = secret.upper().encode(encoding)
elif isinstance(secret, bytes):
# FIXME: just trusting ascii upper will work?
# and if not, how to do codepage specific case conversion?
# we could decode first using <encoding>,
# but *that* might not always be right.
secret = secret.upper()
else:
raise TypeError("secret must be unicode or bytes")
secret = right_pad_string(secret, 14)
return des_encrypt_block(secret[0:7], MAGIC) + \
des_encrypt_block(secret[7:14], MAGIC)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# ntlm hash
#=============================================================================
class nthash(uh.StaticHandler):
"""This class implements the NT Password hash, and follows the :ref:`password-hash-api`.
It has no salt and a single fixed round.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept no optional keywords.
Note that while this class outputs lower-case hexidecimal digests,
it will accept upper-case digests as well.
"""
#===================================================================
# class attrs
#===================================================================
name = "nthash"
checksum_chars = uh.HEX_CHARS
checksum_size = 32
#===================================================================
# methods
#===================================================================
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
return hexlify(self.raw(secret)).decode("ascii")
@classmethod
def raw(cls, secret):
"""encode password using MD4-based NTHASH algorithm
:arg secret: secret as unicode or utf-8 encoded bytes
:returns: returns string of raw bytes
"""
secret = to_unicode(secret, "utf-8", param="secret")
# XXX: found refs that say only first 128 chars are used.
return md4(secret.encode("utf-16-le")).digest()
@classmethod
def raw_nthash(cls, secret, hex=False):
warn("nthash.raw_nthash() is deprecated, and will be removed "
"in Passlib 1.8, please use nthash.raw() instead",
DeprecationWarning)
ret = nthash.raw(secret)
return hexlify(ret).decode("ascii") if hex else ret
#===================================================================
# eoc
#===================================================================
bsd_nthash = uh.PrefixWrapper("bsd_nthash", nthash, prefix="$3$$", ident="$3$$",
doc="""The class support FreeBSD's representation of NTHASH
(which is compatible with the :ref:`modular-crypt-format`),
and follows the :ref:`password-hash-api`.
It has no salt and a single fixed round.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept no optional keywords.
""")
##class ntlm_pair(object):
## "combined lmhash & nthash"
## name = "ntlm_pair"
## setting_kwds = ()
## _hash_regex = re.compile(u"^(?P<lm>[0-9a-f]{32}):(?P<nt>[0-9][a-f]{32})$",
## re.I)
##
## @classmethod
## def identify(cls, hash):
## hash = to_unicode(hash, "latin-1", "hash")
## return len(hash) == 65 and cls._hash_regex.match(hash) is not None
##
## @classmethod
## def genconfig(cls):
## return None
##
## @classmethod
## def genhash(cls, secret, config):
## if config is not None and not cls.identify(config):
## raise uh.exc.InvalidHashError(cls)
## return cls.encrypt(secret)
##
## @classmethod
## def encrypt(cls, secret):
## return lmhash.encrypt(secret) + ":" + nthash.encrypt(secret)
##
## @classmethod
## def verify(cls, secret, hash):
## hash = to_unicode(hash, "ascii", "hash")
## m = cls._hash_regex.match(hash)
## if not m:
## raise uh.exc.InvalidHashError(cls)
## lm, nt = m.group("lm", "nt")
## # NOTE: verify against both in case encoding issue
## # causes one not to match.
## return lmhash.verify(secret, lm) or nthash.verify(secret, nt)
#=============================================================================
# msdcc v1
#=============================================================================
class msdcc(uh.HasUserContext, uh.StaticHandler):
"""This class implements Microsoft's Domain Cached Credentials password hash,
and follows the :ref:`password-hash-api`.
It has a fixed number of rounds, and uses the associated
username as the salt.
The :meth:`~passlib.ifc.PasswordHash.encrypt`, :meth:`~passlib.ifc.PasswordHash.genhash`, and :meth:`~passlib.ifc.PasswordHash.verify` methods
have the following optional keywords:
:type user: str
:param user:
String containing name of user account this password is associated with.
This is required to properly calculate the hash.
This keyword is case-insensitive, and should contain just the username
(e.g. ``Administrator``, not ``SOMEDOMAIN\\Administrator``).
Note that while this class outputs lower-case hexidecimal digests,
it will accept upper-case digests as well.
"""
name = "msdcc"
checksum_chars = uh.HEX_CHARS
checksum_size = 32
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
return hexlify(self.raw(secret, self.user)).decode("ascii")
@classmethod
def raw(cls, secret, user):
"""encode password using mscash v1 algorithm
:arg secret: secret as unicode or utf-8 encoded bytes
:arg user: username to use as salt
:returns: returns string of raw bytes
"""
secret = to_unicode(secret, "utf-8", param="secret").encode("utf-16-le")
user = to_unicode(user, "utf-8", param="user").lower().encode("utf-16-le")
return md4(md4(secret).digest() + user).digest()
#=============================================================================
# msdcc2 aka mscash2
#=============================================================================
class msdcc2(uh.HasUserContext, uh.StaticHandler):
"""This class implements version 2 of Microsoft's Domain Cached Credentials
password hash, and follows the :ref:`password-hash-api`.
It has a fixed number of rounds, and uses the associated
username as the salt.
The :meth:`~passlib.ifc.PasswordHash.encrypt`, :meth:`~passlib.ifc.PasswordHash.genhash`, and :meth:`~passlib.ifc.PasswordHash.verify` methods
have the following extra keyword:
:type user: str
:param user:
String containing name of user account this password is associated with.
This is required to properly calculate the hash.
This keyword is case-insensitive, and should contain just the username
(e.g. ``Administrator``, not ``SOMEDOMAIN\\Administrator``).
"""
name = "msdcc2"
checksum_chars = uh.HEX_CHARS
checksum_size = 32
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
return hexlify(self.raw(secret, self.user)).decode("ascii")
@classmethod
def raw(cls, secret, user):
"""encode password using msdcc v2 algorithm
:type secret: unicode or utf-8 bytes
:arg secret: secret
:type user: str
:arg user: username to use as salt
:returns: returns string of raw bytes
"""
from passlib.utils.pbkdf2 import pbkdf2
secret = to_unicode(secret, "utf-8", param="secret").encode("utf-16-le")
user = to_unicode(user, "utf-8", param="user").lower().encode("utf-16-le")
tmp = md4(md4(secret).digest() + user).digest()
return pbkdf2(tmp, user, 10240, 16, 'hmac-sha1')
#=============================================================================
# eof
#=============================================================================
|
shiblon/pytour | refs/heads/master | static/js/pypyjs/pypy-nojit.js-0.3.1/lib/modules/logging/config.py | 8 | # Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import cStringIO
import errno
import io
import logging
import logging.handlers
import os
import re
import socket
import struct
import sys
import traceback
import types
try:
import thread
import threading
except ImportError:
thread = None
from SocketServer import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
RESET_ERROR = errno.ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import ConfigParser
cp = ConfigParser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.readfp(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _encoded(s):
return s if isinstance(s, str) else s.encode('utf-8')
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp.get("formatters", "keys")
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
opts = cp.options(sectname)
if "format" in opts:
fs = cp.get(sectname, "format", 1)
else:
fs = None
if "datefmt" in opts:
dfs = cp.get(sectname, "datefmt", 1)
else:
dfs = None
c = logging.Formatter
if "class" in opts:
class_name = cp.get(sectname, "class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp.get("handlers", "keys")
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
sectname = "handler_%s" % hand
klass = cp.get(sectname, "class")
opts = cp.options(sectname)
if "formatter" in opts:
fmt = cp.get(sectname, "formatter")
else:
fmt = ""
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = cp.get(sectname, "args")
args = eval(args, vars(logging))
h = klass(*args)
if "level" in opts:
level = cp.get(sectname, "level")
h.setLevel(level)
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
if "target" in opts:
target = cp.get(sectname,"target")
else:
target = ""
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _install_loggers(cp, handlers, disable_existing_loggers):
"""Create and install loggers"""
# configure the root first
llist = cp.get("loggers", "keys")
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
sectname = "logger_root"
root = logging.root
log = root
opts = cp.options(sectname)
if "level" in opts:
level = cp.get(sectname, "level")
log.setLevel(level)
for h in root.handlers[:]:
root.removeHandler(h)
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
sectname = "logger_%s" % log
qn = cp.get(sectname, "qualname")
opts = cp.options(sectname)
if "propagate" in opts:
propagate = cp.getint(sectname, "propagate")
else:
propagate = 1
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in opts:
level = cp.get(sectname, "level")
logger.setLevel(level)
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = 1
else:
logger.disabled = disable_existing_loggers
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
class ConvertingMixin(object):
"""For ConvertingXXX's, this mixin class provides common functions"""
def convert_with_key(self, key, value, replace=True):
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
if replace:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def convert(self, value):
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict, ConvertingMixin):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
return self.convert_with_key(key, value)
def get(self, key, default=None):
value = dict.get(self, key, default)
return self.convert_with_key(key, value)
def pop(self, key, default=None):
value = dict.pop(self, key, default)
return self.convert_with_key(key, value, replace=False)
class ConvertingList(list, ConvertingMixin):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
return self.convert_with_key(key, value)
def pop(self, idx=-1):
value = list.pop(self, idx)
return self.convert(value)
class ConvertingTuple(tuple, ConvertingMixin):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
# Can't replace a tuple entry.
return self.convert_with_key(key, value, replace=False)
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
# Issue 12718: winpdb replaces __import__ with a Python function, which
# ends up being treated as a bound method. To avoid problems, we
# set the importer on the instance, but leave it defined in the class
# so existing code doesn't break
if type(__import__) == types.FunctionType:
self.importer = __import__
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = root.manager.loggerDict.keys()
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
name = _encoded(name)
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config['class'] = cname # restore for deferred configuration
raise StandardError('target not configured yet')
config['target'] = th
except StandardError as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread:
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
import tempfile
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = cStringIO.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if e.errno != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
|
mapnik/python-mapnik | refs/heads/master | test/python_tests/mapnik_logger_test.py | 5 | #!/usr/bin/env python
from nose.tools import eq_
import mapnik
from .utilities import run_all
def test_logger_init():
eq_(mapnik.severity_type.Debug, 0)
eq_(mapnik.severity_type.Warn, 1)
eq_(mapnik.severity_type.Error, 2)
eq_(getattr(mapnik.severity_type, "None"), 3)
default = mapnik.logger.get_severity()
mapnik.logger.set_severity(mapnik.severity_type.Debug)
eq_(mapnik.logger.get_severity(), mapnik.severity_type.Debug)
mapnik.logger.set_severity(default)
eq_(mapnik.logger.get_severity(), default)
if __name__ == "__main__":
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
naritta/numpy | refs/heads/master | numpy/polynomial/tests/test_polynomial.py | 123 | """Tests for polynomial module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
def trim(x):
return poly.polytrim(x, tol=1e-6)
T0 = [1]
T1 = [0, 1]
T2 = [-1, 0, 2]
T3 = [0, -3, 0, 4]
T4 = [1, 0, -8, 0, 8]
T5 = [0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [0, -7, 0, 56, 0, -112, 0, 64]
T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestConstants(TestCase):
def test_polydomain(self):
assert_equal(poly.polydomain, [-1, 1])
def test_polyzero(self):
assert_equal(poly.polyzero, [0])
def test_polyone(self):
assert_equal(poly.polyone, [1])
def test_polyx(self):
assert_equal(poly.polyx, [0, 1])
class TestArithmetic(TestCase):
def test_polyadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = poly.polyadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polysub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = poly.polysub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polymulx(self):
assert_equal(poly.polymulx([0]), [0])
assert_equal(poly.polymulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i + 1) + [1]
assert_equal(poly.polymulx(ser), tgt)
def test_polymul(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += 1
res = poly.polymul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_polydiv(self):
# check zero division
assert_raises(ZeroDivisionError, poly.polydiv, [1], [0])
# check scalar division
quo, rem = poly.polydiv([2], [2])
assert_equal((quo, rem), (1, 0))
quo, rem = poly.polydiv([2, 2], [2])
assert_equal((quo, rem), ((1, 1), 0))
# check rest.
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1, 2]
cj = [0]*j + [1, 2]
tgt = poly.polyadd(ci, cj)
quo, rem = poly.polydiv(tgt, ci)
res = poly.polyadd(poly.polymul(quo, ci), rem)
assert_equal(res, tgt, err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([1., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = poly.polyval(x, [1., 2., 3.])
def test_polyval(self):
#check empty input
assert_equal(poly.polyval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [x**i for i in range(5)]
for i in range(5):
tgt = y[i]
res = poly.polyval(x, [0]*i + [1])
assert_almost_equal(res, tgt)
tgt = x*(x**2 - 1)
res = poly.polyval(x, [0, -1, 0, 1])
assert_almost_equal(res, tgt)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(poly.polyval(x, [1]).shape, dims)
assert_equal(poly.polyval(x, [1, 0]).shape, dims)
assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims)
def test_polyval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, poly.polyval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = poly.polyval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = poly.polyval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_polyval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, poly.polyval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = poly.polyval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = poly.polyval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_polygrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = poly.polygrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = poly.polygrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_polygrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = poly.polygrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = poly.polygrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_polyint(self):
# check exceptions
assert_raises(ValueError, poly.polyint, [0], .5)
assert_raises(ValueError, poly.polyint, [0], -1)
assert_raises(ValueError, poly.polyint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = poly.polyint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
res = poly.polyint(pol, m=1, k=[i])
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
res = poly.polyint(pol, m=1, k=[i], lbnd=-1)
assert_almost_equal(poly.polyval(-1, res), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
res = poly.polyint(pol, m=1, k=[i], scl=2)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1)
res = poly.polyint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1, k=[k])
res = poly.polyint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)
res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = poly.polyint(tgt, m=1, k=[k], scl=2)
res = poly.polyint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_polyint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T
res = poly.polyint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyint(c) for c in c2d])
res = poly.polyint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyint(c, k=3) for c in c2d])
res = poly.polyint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_polyder(self):
# check exceptions
assert_raises(ValueError, poly.polyder, [0], .5)
assert_raises(ValueError, poly.polyder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = poly.polyder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = poly.polyder(poly.polyint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_polyder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T
res = poly.polyder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([poly.polyder(c) for c in c2d])
res = poly.polyder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_polyvander(self):
# check for 1d x
x = np.arange(3)
v = poly.polyvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], poly.polyval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = poly.polyvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], poly.polyval(x, coef))
def test_polyvander2d(self):
# also tests polyval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = poly.polyvander2d(x1, x2, [1, 2])
tgt = poly.polyval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = poly.polyvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_polyvander3d(self):
# also tests polyval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = poly.polyvander3d(x1, x2, x3, [1, 2, 3])
tgt = poly.polyval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, poly.polycompanion, [])
assert_raises(ValueError, poly.polycompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(poly.polycompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(poly.polycompanion([1, 2])[0, 0] == -.5)
class TestMisc(TestCase):
def test_polyfromroots(self):
res = poly.polyfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = Tlist[i]
res = poly.polyfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res), trim(tgt))
def test_polyroots(self):
assert_almost_equal(poly.polyroots([1]), [])
assert_almost_equal(poly.polyroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = poly.polyroots(poly.polyfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_polyfit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, poly.polyfit, [1], [1], -1)
assert_raises(TypeError, poly.polyfit, [[1]], [1], 0)
assert_raises(TypeError, poly.polyfit, [], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0)
assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0)
assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0)
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = poly.polyfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(poly.polyval(x, coef3), y)
#
coef4 = poly.polyfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(poly.polyval(x, coef4), y)
#
coef2d = poly.polyfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
wcoef3 = poly.polyfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(poly.polyfit(x, x, 1), [0, 1])
def test_polytrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, poly.polytrim, coef, -1)
# Test results
assert_equal(poly.polytrim(coef), coef[:-1])
assert_equal(poly.polytrim(coef, 1), coef[:-3])
assert_equal(poly.polytrim(coef, 2), [0])
def test_polyline(self):
assert_equal(poly.polyline(3, 4), [3, 4])
if __name__ == "__main__":
run_module_suite()
|
cytec/Sick-Beard | refs/heads/master | lib/sqlalchemy/dialects/mysql/pyodbc.py | 17 | # mysql/pyodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MySQL database via the pyodbc adapter.
pyodbc is available at:
http://pypi.python.org/pypi/pyodbc/
Connecting
----------
Connect string::
mysql+pyodbc://<username>:<password>@<dsnname>
Limitations
-----------
The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.
"""
from sqlalchemy.dialects.mysql.base import MySQLDialect, MySQLExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy.engine import base as engine_base
from sqlalchemy import util
import re
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault('convert_unicode', True)
super(MySQLDialect_pyodbc, self).__init__(**kw)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
return 'latin1'
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
else:
return None
dialect = MySQLDialect_pyodbc
|
Make-O-Matic/MOM-Platform | refs/heads/master | platform/views/errors.py | 1 | # Copyright (c) 2016-2017 Enproduktion GmbH & Laber's Lab e.U. (FN 394440i, Austria)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from flask import render_template
from flask.views import View
class ShowErrors(View):
"""View for Errors
This class handles everything regarding the errors route.
Returns
-------
the errors.html template
"""
# message = "Nothing to see here."
def dispatch_request(self, message="test"):
return render_template("errors.html", message=message)
|
juharris/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/ops/operator_test_util.py | 10 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing `OperatorPDBase` and related classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
import tensorflow as tf
@six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init
class OperatorPDDerivedClassTest(tf.test.TestCase):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
def setUp(self):
self._rng = np.random.RandomState(42)
def _compare_results(
self, expected, actual, static_shapes=True, atol=1e-5):
"""Compare expected value (array) to the actual value (Tensor)."""
if static_shapes:
self.assertEqual(expected.shape, actual.get_shape())
self.assertAllClose(expected, actual.eval(), atol=atol)
@abc.abstractmethod
def _build_operator_and_mat(self, batch_shape, k, dtype=np.float64):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator represents a (batch) matrix. This method returns both
together, and is used e.g. by tests.
Args:
batch_shape: List-like of Python integers giving batch shape of operator.
k: Python integer, the event size.
dtype: Numpy dtype. Data type of returned array/operator.
Returns:
operator: `OperatorPDBase` subclass.
mat: numpy array representing a (batch) matrix.
"""
# Create a matrix as a numpy array. Shape = batch_shape + [k, k].
# Create an OperatorPDDiag that should have the same behavior as the matrix.
# All arguments are convertable to numpy arrays.
#
batch_shape = list(batch_shape)
mat_shape = batch_shape + [k, k]
# return operator, mat
raise NotImplementedError("Not implemented yet.")
def testToDense(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
for dtype in [np.float32, np.float64]:
operator, mat = self._build_operator_and_mat(
batch_shape, k, dtype=dtype)
self._compare_results(
expected=mat,
actual=operator.to_dense())
def testSqrtToDense(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
sqrt = operator.sqrt_to_dense()
self.assertEqual(mat.shape, sqrt.get_shape())
# Square roots are not unique, but SS^T should equal mat. In this
# case however, we should have S = S^T.
self._compare_results(
expected=mat,
actual=tf.batch_matmul(sqrt, sqrt))
def testDeterminants(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
expected_det = tf.matrix_determinant(mat).eval()
self._compare_results(expected_det, operator.det())
self._compare_results(np.log(expected_det), operator.log_det())
def testMatmul(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
# Work with 5 simultaneous systems. 5 is arbitrary.
x = self._rng.randn(*(batch_shape + (k, 5)))
self._compare_results(
expected=tf.batch_matmul(mat, x).eval(),
actual=operator.matmul(x))
def testSqrtMatmul(self):
# Square roots are not unique, but we should have SS^T x = Ax, and in our
# case, we should have S = S^T, so SSx = Ax.
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
# Work with 5 simultaneous systems. 5 is arbitrary.
x = self._rng.randn(*(batch_shape + (k, 5)))
self._compare_results(
expected=tf.batch_matmul(mat, x).eval(),
actual=operator.sqrt_matmul(operator.sqrt_matmul(x)))
def testSolve(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
# Work with 5 simultaneous systems. 5 is arbitrary.
x = self._rng.randn(*(batch_shape + (k, 5)))
self._compare_results(
expected=tf.matrix_solve(mat, x).eval(), actual=operator.solve(x))
def testSqrtSolve(self):
# Square roots are not unique, but we should still have
# S^{-T} S^{-1} x = A^{-1} x.
# In our case, we should have S = S^T, so then S^{-1} S^{-1} x = A^{-1} x.
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
# Work with 5 simultaneous systems. 5 is arbitrary.
x = self._rng.randn(*(batch_shape + (k, 5)))
self._compare_results(
expected=tf.matrix_solve(mat, x).eval(),
actual=operator.sqrt_solve(operator.sqrt_solve(x)))
def testAddToTensor(self):
with self.test_session():
for batch_shape in [(), (2, 3,)]:
for k in [1, 4]:
operator, mat = self._build_operator_and_mat(batch_shape, k)
tensor = tf.ones_like(mat)
self._compare_results(
expected=(mat + tensor).eval(),
actual=operator.add_to_tensor(tensor))
|
Tinche/cattrs | refs/heads/master | src/cattr/preconf/pyyaml.py | 1 | """Preconfigured converters for pyyaml."""
from datetime import datetime
from .._compat import FrozenSetSubscriptable
from ..converters import GenConverter
from . import validate_datetime
def configure_converter(converter: GenConverter):
"""
Configure the converter for use with the pyyaml library.
* frozensets are serialized as lists
* string enums are converted into strings explicitly
"""
converter.register_unstructure_hook(
str, lambda v: v if v.__class__ is str else v.value
)
converter.register_structure_hook(datetime, validate_datetime)
def make_converter(*args, **kwargs) -> GenConverter:
kwargs["unstruct_collection_overrides"] = {
**kwargs.get("unstruct_collection_overrides", {}),
FrozenSetSubscriptable: list,
}
res = GenConverter(*args, **kwargs)
configure_converter(res)
return res
|
person142/scipy | refs/heads/master | scipy/signal/tests/test_signaltools.py | 1 | # -*- coding: utf-8 -*-
import sys
from decimal import Decimal
from itertools import product
from math import gcd
import warnings
import pytest
from pytest import raises as assert_raises
from numpy.testing import (
assert_equal,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_warns, assert_array_less,
suppress_warnings)
from numpy import array, arange
import numpy as np
from scipy.fft import fft
from scipy.ndimage.filters import correlate1d
from scipy.optimize import fmin, linear_sum_assignment
from scipy import signal
from scipy.signal import (
correlate, convolve, convolve2d,
fftconvolve, oaconvolve, choose_conv_method,
hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos,
invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt,
sosfilt_zi, tf2zpk, BadCoefficients, detrend, unique_roots, residue,
residuez)
from scipy.signal.windows import hann
from scipy.signal.signaltools import (_filtfilt_gust, _compute_factors,
_group_poles)
from scipy.signal._upfirdn import _upfirdn_modes
class _TestConvolve(object):
def test_basic(self):
a = [3, 4, 5, 6, 5, 4]
b = [1, 2, 3]
c = convolve(a, b)
assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12]))
def test_same(self):
a = [3, 4, 5]
b = [1, 2, 3, 4]
c = convolve(a, b, mode="same")
assert_array_equal(c, array([10, 22, 34]))
def test_same_eq(self):
a = [3, 4, 5]
b = [1, 2, 3]
c = convolve(a, b, mode="same")
assert_array_equal(c, array([10, 22, 22]))
def test_complex(self):
x = array([1 + 1j, 2 + 1j, 3 + 1j])
y = array([1 + 1j, 2 + 1j])
z = convolve(x, y)
assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j]))
def test_zero_rank(self):
a = 1289
b = 4567
c = convolve(a, b)
assert_equal(c, a * b)
def test_broadcastable(self):
a = np.arange(27).reshape(3, 3, 3)
b = np.arange(3)
for i in range(3):
b_shape = [1]*3
b_shape[i] = 3
x = convolve(a, b.reshape(b_shape), method='direct')
y = convolve(a, b.reshape(b_shape), method='fft')
assert_allclose(x, y)
def test_single_element(self):
a = array([4967])
b = array([3920])
c = convolve(a, b)
assert_equal(c, a * b)
def test_2d_arrays(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve(a, b)
d = array([[2, 7, 16, 17, 12],
[10, 30, 62, 58, 38],
[12, 31, 58, 49, 30]])
assert_array_equal(c, d)
def test_input_swapping(self):
small = arange(8).reshape(2, 2, 2)
big = 1j * arange(27).reshape(3, 3, 3)
big += arange(27)[::-1].reshape(3, 3, 3)
out_array = array(
[[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j],
[52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j],
[46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j],
[40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]],
[[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j],
[282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j],
[246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j],
[142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]],
[[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j],
[174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j],
[138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j],
[70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]],
[[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j],
[68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j],
[38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j],
[12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]])
assert_array_equal(convolve(small, big, 'full'), out_array)
assert_array_equal(convolve(big, small, 'full'), out_array)
assert_array_equal(convolve(small, big, 'same'),
out_array[1:3, 1:3, 1:3])
assert_array_equal(convolve(big, small, 'same'),
out_array[0:3, 0:3, 0:3])
assert_array_equal(convolve(small, big, 'valid'),
out_array[1:3, 1:3, 1:3])
assert_array_equal(convolve(big, small, 'valid'),
out_array[1:3, 1:3, 1:3])
def test_invalid_params(self):
a = [3, 4, 5]
b = [1, 2, 3]
assert_raises(ValueError, convolve, a, b, mode='spam')
assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft')
assert_raises(ValueError, convolve, a, b, mode='ham', method='direct')
assert_raises(ValueError, convolve, a, b, mode='full', method='bacon')
assert_raises(ValueError, convolve, a, b, mode='same', method='bacon')
class TestConvolve(_TestConvolve):
def test_valid_mode2(self):
# See gh-5897
a = [1, 2, 3, 6, 5, 3]
b = [2, 3, 4, 5, 3, 4, 2, 2, 1]
expected = [70, 78, 73, 65]
out = convolve(a, b, 'valid')
assert_array_equal(out, expected)
out = convolve(b, a, 'valid')
assert_array_equal(out, expected)
a = [1 + 5j, 2 - 1j, 3 + 0j]
b = [2 - 3j, 1 + 0j]
expected = [2 - 3j, 8 - 10j]
out = convolve(a, b, 'valid')
assert_array_equal(out, expected)
out = convolve(b, a, 'valid')
assert_array_equal(out, expected)
def test_same_mode(self):
a = [1, 2, 3, 3, 1, 2]
b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3]
c = convolve(a, b, 'same')
d = array([57, 61, 63, 57, 45, 36])
assert_array_equal(c, d)
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'})
def test_convolve_method(self, n=100):
types = sum([t for _, t in np.sctypes.items()], [])
types = {np.dtype(t).name for t in types}
# These types include 'bool' and all precisions (int8, float32, etc)
# The removed types throw errors in correlate or fftconvolve
for dtype in ['complex256', 'complex192', 'float128', 'float96',
'str', 'void', 'bytes', 'object', 'unicode', 'string']:
if dtype in types:
types.remove(dtype)
args = [(t1, t2, mode) for t1 in types for t2 in types
for mode in ['valid', 'full', 'same']]
# These are random arrays, which means test is much stronger than
# convolving testing by convolving two np.ones arrays
np.random.seed(42)
array_types = {'i': np.random.choice([0, 1], size=n),
'f': np.random.randn(n)}
array_types['b'] = array_types['u'] = array_types['i']
array_types['c'] = array_types['f'] + 0.5j*array_types['f']
for t1, t2, mode in args:
x1 = array_types[np.dtype(t1).kind].astype(t1)
x2 = array_types[np.dtype(t2).kind].astype(t2)
results = {key: convolve(x1, x2, method=key, mode=mode)
for key in ['fft', 'direct']}
assert_equal(results['fft'].dtype, results['direct'].dtype)
if 'bool' in t1 and 'bool' in t2:
assert_equal(choose_conv_method(x1, x2), 'direct')
continue
# Found by experiment. Found approx smallest value for (rtol, atol)
# threshold to have tests pass.
if any([t in {'complex64', 'float32'} for t in [t1, t2]]):
kwargs = {'rtol': 1.0e-4, 'atol': 1e-6}
elif 'float16' in [t1, t2]:
# atol is default for np.allclose
kwargs = {'rtol': 1e-3, 'atol': 1e-3}
else:
# defaults for np.allclose (different from assert_allclose)
kwargs = {'rtol': 1e-5, 'atol': 1e-8}
assert_allclose(results['fft'], results['direct'], **kwargs)
def test_convolve_method_large_input(self):
# This is really a test that convolving two large integers goes to the
# direct method even if they're in the fft method.
for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]:
z = np.array([2**n], dtype=np.int64)
fft = convolve(z, z, method='fft')
direct = convolve(z, z, method='direct')
# this is the case when integer precision gets to us
# issue #6076 has more detail, hopefully more tests after resolved
if n < 50:
assert_equal(fft, direct)
assert_equal(fft, 2**(2*n))
assert_equal(direct, 2**(2*n))
def test_mismatched_dims(self):
# Input arrays should have the same number of dimensions
assert_raises(ValueError, convolve, [1], 2, method='direct')
assert_raises(ValueError, convolve, 1, [2], method='direct')
assert_raises(ValueError, convolve, [1], 2, method='fft')
assert_raises(ValueError, convolve, 1, [2], method='fft')
assert_raises(ValueError, convolve, [1], [[2]])
assert_raises(ValueError, convolve, [3], 2)
class _TestConvolve2d(object):
def test_2d_arrays(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
d = array([[2, 7, 16, 17, 12],
[10, 30, 62, 58, 38],
[12, 31, 58, 49, 30]])
e = convolve2d(a, b)
assert_array_equal(e, d)
def test_valid_mode(self):
e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
f = [[1, 2, 3], [3, 4, 5]]
h = array([[62, 80, 98, 116, 134]])
g = convolve2d(e, f, 'valid')
assert_array_equal(g, h)
# See gh-5897
g = convolve2d(f, e, 'valid')
assert_array_equal(g, h)
def test_valid_mode_complx(self):
e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j
h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]])
g = convolve2d(e, f, 'valid')
assert_array_almost_equal(g, h)
# See gh-5897
g = convolve2d(f, e, 'valid')
assert_array_equal(g, h)
def test_fillvalue(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
fillval = 1
c = convolve2d(a, b, 'full', 'fill', fillval)
d = array([[24, 26, 31, 34, 32],
[28, 40, 62, 64, 52],
[32, 46, 67, 62, 48]])
assert_array_equal(c, d)
def test_fillvalue_deprecations(self):
# Deprecated 2017-07, scipy version 1.0.0
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning, "Casting complex values to real")
r = sup.record(DeprecationWarning, "could not cast `fillvalue`")
convolve2d([[1]], [[1, 2]], fillvalue=1j)
assert_(len(r) == 1)
warnings.filterwarnings(
"error", message="could not cast `fillvalue`",
category=DeprecationWarning)
assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=1j)
with suppress_warnings():
warnings.filterwarnings(
"always", message="`fillvalue` must be scalar or an array ",
category=DeprecationWarning)
assert_warns(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=[1, 2])
warnings.filterwarnings(
"error", message="`fillvalue` must be scalar or an array ",
category=DeprecationWarning)
assert_raises(DeprecationWarning, convolve2d, [[1]], [[1, 2]],
fillvalue=[1, 2])
def test_fillvalue_empty(self):
# Check that fillvalue being empty raises an error:
assert_raises(ValueError, convolve2d, [[1]], [[1, 2]],
fillvalue=[])
def test_wrap_boundary(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve2d(a, b, 'full', 'wrap')
d = array([[80, 80, 74, 80, 80],
[68, 68, 62, 68, 68],
[80, 80, 74, 80, 80]])
assert_array_equal(c, d)
def test_sym_boundary(self):
a = [[1, 2, 3], [3, 4, 5]]
b = [[2, 3, 4], [4, 5, 6]]
c = convolve2d(a, b, 'full', 'symm')
d = array([[34, 30, 44, 62, 66],
[52, 48, 62, 80, 84],
[82, 78, 92, 110, 114]])
assert_array_equal(c, d)
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'})
class TestConvolve2d(_TestConvolve2d):
def test_same_mode(self):
e = [[1, 2, 3], [3, 4, 5]]
f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
g = convolve2d(e, f, 'same')
h = array([[22, 28, 34],
[80, 98, 116]])
assert_array_equal(g, h)
def test_valid_mode2(self):
# See gh-5897
e = [[1, 2, 3], [3, 4, 5]]
f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]]
expected = [[62, 80, 98, 116, 134]]
out = convolve2d(e, f, 'valid')
assert_array_equal(out, expected)
out = convolve2d(f, e, 'valid')
assert_array_equal(out, expected)
e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]]
f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]]
expected = [[27 - 1j, 46. + 2j]]
out = convolve2d(e, f, 'valid')
assert_array_equal(out, expected)
# See gh-5897
out = convolve2d(f, e, 'valid')
assert_array_equal(out, expected)
def test_consistency_convolve_funcs(self):
# Compare np.convolve, signal.convolve, signal.convolve2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.convolve(a, b, mode=mode),
signal.convolve(a, b, mode=mode))
assert_almost_equal(np.squeeze(
signal.convolve2d([a], [b], mode=mode)),
signal.convolve(a, b, mode=mode))
def test_invalid_dims(self):
assert_raises(ValueError, convolve2d, 3, 4)
assert_raises(ValueError, convolve2d, [3], [4])
assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]])
class TestFFTConvolve(object):
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_real(self, axes):
a = array([1, 2, 3])
expected = array([1, 4, 10, 12, 9.])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_real_axes(self, axes):
a = array([1, 2, 3])
expected = array([1, 4, 10, 12, 9.])
a = np.tile(a, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_complex(self, axes):
a = array([1 + 1j, 2 + 2j, 3 + 3j])
expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_complex_axes(self, axes):
a = array([1 + 1j, 2 + 2j, 3 + 3j])
expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
a = np.tile(a, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['',
None,
[0, 1],
[1, 0],
[0, -1],
[-1, 0],
[-2, 1],
[1, -2],
[-2, -1],
[-1, -2]])
def test_2d_real_same(self, axes):
a = array([[1, 2, 3],
[4, 5, 6]])
expected = array([[1, 4, 10, 12, 9],
[8, 26, 56, 54, 36],
[16, 40, 73, 60, 36]])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [[1, 2],
[2, 1],
[1, -1],
[-1, 1],
[-2, 2],
[2, -2],
[-2, -1],
[-1, -2]])
def test_2d_real_same_axes(self, axes):
a = array([[1, 2, 3],
[4, 5, 6]])
expected = array([[1, 4, 10, 12, 9],
[8, 26, 56, 54, 36],
[16, 40, 73, 60, 36]])
a = np.tile(a, [2, 1, 1])
expected = np.tile(expected, [2, 1, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['',
None,
[0, 1],
[1, 0],
[0, -1],
[-1, 0],
[-2, 1],
[1, -2],
[-2, -1],
[-1, -2]])
def test_2d_complex_same(self, axes):
a = array([[1 + 2j, 3 + 4j, 5 + 6j],
[2 + 1j, 4 + 3j, 6 + 5j]])
expected = array([
[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
[10j, 44j, 118j, 156j, 122j],
[3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
])
if axes == '':
out = fftconvolve(a, a)
else:
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [[1, 2],
[2, 1],
[1, -1],
[-1, 1],
[-2, 2],
[2, -2],
[-2, -1],
[-1, -2]])
def test_2d_complex_same_axes(self, axes):
a = array([[1 + 2j, 3 + 4j, 5 + 6j],
[2 + 1j, 4 + 3j, 6 + 5j]])
expected = array([
[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
[10j, 44j, 118j, 156j, 122j],
[3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
])
a = np.tile(a, [2, 1, 1])
expected = np.tile(expected, [2, 1, 1])
out = fftconvolve(a, a, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_real_same_mode(self, axes):
a = array([1, 2, 3])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected_1 = array([35., 41., 47.])
expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])
if axes == '':
out = fftconvolve(a, b, 'same')
else:
out = fftconvolve(a, b, 'same', axes=axes)
assert_array_almost_equal(out, expected_1)
if axes == '':
out = fftconvolve(b, a, 'same')
else:
out = fftconvolve(b, a, 'same', axes=axes)
assert_array_almost_equal(out, expected_2)
@pytest.mark.parametrize('axes', [1, -1, [1], [-1]])
def test_real_same_mode_axes(self, axes):
a = array([1, 2, 3])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected_1 = array([35., 41., 47.])
expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.])
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected_1 = np.tile(expected_1, [2, 1])
expected_2 = np.tile(expected_2, [2, 1])
out = fftconvolve(a, b, 'same', axes=axes)
assert_array_almost_equal(out, expected_1)
out = fftconvolve(b, a, 'same', axes=axes)
assert_array_almost_equal(out, expected_2)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_valid_mode_real(self, axes):
# See gh-5897
a = array([3, 2, 1])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = array([24., 31., 41., 43., 49., 25., 12.])
if axes == '':
out = fftconvolve(a, b, 'valid')
else:
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
if axes == '':
out = fftconvolve(b, a, 'valid')
else:
out = fftconvolve(b, a, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1]])
def test_valid_mode_real_axes(self, axes):
# See gh-5897
a = array([3, 2, 1])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = array([24., 31., 41., 43., 49., 25., 12.])
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_valid_mode_complex(self, axes):
a = array([3 - 1j, 2 + 7j, 1 + 0j])
b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])
if axes == '':
out = fftconvolve(a, b, 'valid')
else:
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
if axes == '':
out = fftconvolve(b, a, 'valid')
else:
out = fftconvolve(b, a, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_valid_mode_complex_axes(self, axes):
a = array([3 - 1j, 2 + 7j, 1 + 0j])
b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
out = fftconvolve(b, a, 'valid', axes=axes)
assert_array_almost_equal(out, expected)
def test_valid_mode_ignore_nonaxes(self):
# See gh-5897
a = array([3, 2, 1])
b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = array([24., 31., 41., 43., 49., 25., 12.])
a = np.tile(a, [2, 1])
b = np.tile(b, [1, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'valid', axes=1)
assert_array_almost_equal(out, expected)
def test_empty(self):
# Regression test for #1745: crashes with 0-length input.
assert_(fftconvolve([], []).size == 0)
assert_(fftconvolve([5, 6], []).size == 0)
assert_(fftconvolve([], [7]).size == 0)
def test_zero_rank(self):
a = array(4967)
b = array(3920)
out = fftconvolve(a, b)
assert_equal(out, a * b)
def test_single_element(self):
a = array([4967])
b = array([3920])
out = fftconvolve(a, b)
assert_equal(out, a * b)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_random_data(self, axes):
np.random.seed(1234)
a = np.random.rand(1233) + 1j * np.random.rand(1233)
b = np.random.rand(1321) + 1j * np.random.rand(1321)
expected = np.convolve(a, b, 'full')
if axes == '':
out = fftconvolve(a, b, 'full')
else:
out = fftconvolve(a, b, 'full', axes=axes)
assert_(np.allclose(out, expected, rtol=1e-10))
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_random_data_axes(self, axes):
np.random.seed(1234)
a = np.random.rand(1233) + 1j * np.random.rand(1233)
b = np.random.rand(1321) + 1j * np.random.rand(1321)
expected = np.convolve(a, b, 'full')
a = np.tile(a, [2, 1])
b = np.tile(b, [2, 1])
expected = np.tile(expected, [2, 1])
out = fftconvolve(a, b, 'full', axes=axes)
assert_(np.allclose(out, expected, rtol=1e-10))
@pytest.mark.parametrize('axes', [[1, 4],
[4, 1],
[1, -1],
[-1, 1],
[-4, 4],
[4, -4],
[-4, -1],
[-1, -4]])
def test_random_data_multidim_axes(self, axes):
a_shape, b_shape = (123, 22), (132, 11)
np.random.seed(1234)
a = np.random.rand(*a_shape) + 1j * np.random.rand(*a_shape)
b = np.random.rand(*b_shape) + 1j * np.random.rand(*b_shape)
expected = convolve2d(a, b, 'full')
a = a[:, :, None, None, None]
b = b[:, :, None, None, None]
expected = expected[:, :, None, None, None]
a = np.rollaxis(a.swapaxes(0, 2), 1, 5)
b = np.rollaxis(b.swapaxes(0, 2), 1, 5)
expected = np.rollaxis(expected.swapaxes(0, 2), 1, 5)
# use 1 for dimension 2 in a and 3 in b to test broadcasting
a = np.tile(a, [2, 1, 3, 1, 1])
b = np.tile(b, [2, 1, 1, 4, 1])
expected = np.tile(expected, [2, 1, 3, 4, 1])
out = fftconvolve(a, b, 'full', axes=axes)
assert_allclose(out, expected, rtol=1e-10, atol=1e-10)
@pytest.mark.slow
@pytest.mark.parametrize(
'n',
list(range(1, 100)) +
list(range(1000, 1500)) +
np.random.RandomState(1234).randint(1001, 10000, 5).tolist())
def test_many_sizes(self, n):
a = np.random.rand(n) + 1j * np.random.rand(n)
b = np.random.rand(n) + 1j * np.random.rand(n)
expected = np.convolve(a, b, 'full')
out = fftconvolve(a, b, 'full')
assert_allclose(out, expected, atol=1e-10)
out = fftconvolve(a, b, 'full', axes=[0])
assert_allclose(out, expected, atol=1e-10)
def fftconvolve_err(*args, **kwargs):
raise RuntimeError('Fell back to fftconvolve')
def gen_oa_shapes(sizes):
return [(a, b) for a, b in product(sizes, repeat=2)
if abs(a - b) > 3]
def gen_oa_shapes_2d(sizes):
shapes0 = gen_oa_shapes(sizes)
shapes1 = gen_oa_shapes(sizes)
shapes = [ishapes0+ishapes1 for ishapes0, ishapes1 in
zip(shapes0, shapes1)]
modes = ['full', 'valid', 'same']
return [ishapes+(imode,) for ishapes, imode in product(shapes, modes)
if imode != 'valid' or
(ishapes[0] > ishapes[1] and ishapes[2] > ishapes[3]) or
(ishapes[0] < ishapes[1] and ishapes[2] < ishapes[3])]
def gen_oa_shapes_eq(sizes):
return [(a, b) for a, b in product(sizes, repeat=2)
if a >= b]
class TestOAConvolve(object):
@pytest.mark.slow()
@pytest.mark.parametrize('shape_a_0, shape_b_0',
gen_oa_shapes_eq(list(range(100)) +
list(range(100, 1000, 23)))
)
def test_real_manylens(self, shape_a_0, shape_b_0):
a = np.random.rand(shape_a_0)
b = np.random.rand(shape_b_0)
expected = fftconvolve(a, b)
out = oaconvolve(a, b)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('shape_a_0, shape_b_0',
gen_oa_shapes([50, 47, 6, 4]))
@pytest.mark.parametrize('is_complex', [True, False])
@pytest.mark.parametrize('mode', ['full', 'valid', 'same'])
def test_1d_noaxes(self, shape_a_0, shape_b_0,
is_complex, mode, monkeypatch):
a = np.random.rand(shape_a_0)
b = np.random.rand(shape_b_0)
if is_complex:
a = a + 1j*np.random.rand(shape_a_0)
b = b + 1j*np.random.rand(shape_b_0)
expected = fftconvolve(a, b, mode=mode)
monkeypatch.setattr(signal.signaltools, 'fftconvolve',
fftconvolve_err)
out = oaconvolve(a, b, mode=mode)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [0, 1])
@pytest.mark.parametrize('shape_a_0, shape_b_0',
gen_oa_shapes([50, 47, 6, 4]))
@pytest.mark.parametrize('shape_a_extra', [1, 3])
@pytest.mark.parametrize('shape_b_extra', [1, 3])
@pytest.mark.parametrize('is_complex', [True, False])
@pytest.mark.parametrize('mode', ['full', 'valid', 'same'])
def test_1d_axes(self, axes, shape_a_0, shape_b_0,
shape_a_extra, shape_b_extra,
is_complex, mode, monkeypatch):
ax_a = [shape_a_extra]*2
ax_b = [shape_b_extra]*2
ax_a[axes] = shape_a_0
ax_b[axes] = shape_b_0
a = np.random.rand(*ax_a)
b = np.random.rand(*ax_b)
if is_complex:
a = a + 1j*np.random.rand(*ax_a)
b = b + 1j*np.random.rand(*ax_b)
expected = fftconvolve(a, b, mode=mode, axes=axes)
monkeypatch.setattr(signal.signaltools, 'fftconvolve',
fftconvolve_err)
out = oaconvolve(a, b, mode=mode, axes=axes)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('shape_a_0, shape_b_0, '
'shape_a_1, shape_b_1, mode',
gen_oa_shapes_2d([50, 47, 6, 4]))
@pytest.mark.parametrize('is_complex', [True, False])
def test_2d_noaxes(self, shape_a_0, shape_b_0,
shape_a_1, shape_b_1, mode,
is_complex, monkeypatch):
a = np.random.rand(shape_a_0, shape_a_1)
b = np.random.rand(shape_b_0, shape_b_1)
if is_complex:
a = a + 1j*np.random.rand(shape_a_0, shape_a_1)
b = b + 1j*np.random.rand(shape_b_0, shape_b_1)
expected = fftconvolve(a, b, mode=mode)
monkeypatch.setattr(signal.signaltools, 'fftconvolve',
fftconvolve_err)
out = oaconvolve(a, b, mode=mode)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('axes', [[0, 1], [0, 2], [1, 2]])
@pytest.mark.parametrize('shape_a_0, shape_b_0, '
'shape_a_1, shape_b_1, mode',
gen_oa_shapes_2d([50, 47, 6, 4]))
@pytest.mark.parametrize('shape_a_extra', [1, 3])
@pytest.mark.parametrize('shape_b_extra', [1, 3])
@pytest.mark.parametrize('is_complex', [True, False])
def test_2d_axes(self, axes, shape_a_0, shape_b_0,
shape_a_1, shape_b_1, mode,
shape_a_extra, shape_b_extra,
is_complex, monkeypatch):
ax_a = [shape_a_extra]*3
ax_b = [shape_b_extra]*3
ax_a[axes[0]] = shape_a_0
ax_b[axes[0]] = shape_b_0
ax_a[axes[1]] = shape_a_1
ax_b[axes[1]] = shape_b_1
a = np.random.rand(*ax_a)
b = np.random.rand(*ax_b)
if is_complex:
a = a + 1j*np.random.rand(*ax_a)
b = b + 1j*np.random.rand(*ax_b)
expected = fftconvolve(a, b, mode=mode, axes=axes)
monkeypatch.setattr(signal.signaltools, 'fftconvolve',
fftconvolve_err)
out = oaconvolve(a, b, mode=mode, axes=axes)
assert_array_almost_equal(out, expected)
def test_empty(self):
# Regression test for #1745: crashes with 0-length input.
assert_(oaconvolve([], []).size == 0)
assert_(oaconvolve([5, 6], []).size == 0)
assert_(oaconvolve([], [7]).size == 0)
def test_zero_rank(self):
a = array(4967)
b = array(3920)
out = oaconvolve(a, b)
assert_equal(out, a * b)
def test_single_element(self):
a = array([4967])
b = array([3920])
out = oaconvolve(a, b)
assert_equal(out, a * b)
class TestAllFreqConvolves(object):
@pytest.mark.parametrize('convapproach',
[fftconvolve, oaconvolve])
def test_invalid_shapes(self, convapproach):
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
with assert_raises(ValueError,
match="For 'valid' mode, one must be at least "
"as large as the other in every dimension"):
convapproach(a, b, mode='valid')
@pytest.mark.parametrize('convapproach',
[fftconvolve, oaconvolve])
def test_invalid_shapes_axes(self, convapproach):
a = np.zeros([5, 6, 2, 1])
b = np.zeros([5, 6, 3, 1])
with assert_raises(ValueError,
match=r"incompatible shapes for in1 and in2:"
r" \(5L?, 6L?, 2L?, 1L?\) and"
r" \(5L?, 6L?, 3L?, 1L?\)"):
convapproach(a, b, axes=[0, 1])
@pytest.mark.parametrize('a,b',
[([1], 2),
(1, [2]),
([3], [[2]])])
@pytest.mark.parametrize('convapproach',
[fftconvolve, oaconvolve])
def test_mismatched_dims(self, a, b, convapproach):
with assert_raises(ValueError,
match="in1 and in2 should have the same"
" dimensionality"):
convapproach(a, b)
@pytest.mark.parametrize('convapproach',
[fftconvolve, oaconvolve])
def test_invalid_flags(self, convapproach):
with assert_raises(ValueError,
match="acceptable mode flags are 'valid',"
" 'same', or 'full'"):
convapproach([1], [2], mode='chips')
with assert_raises(ValueError,
match="when provided, axes cannot be empty"):
convapproach([1], [2], axes=[])
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
convapproach([1], [2], axes=[[1, 2], [3, 4]])
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
convapproach([1], [2], axes=[1., 2., 3., 4.])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
convapproach([1], [2], axes=[1])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
convapproach([1], [2], axes=[-2])
with assert_raises(ValueError,
match="all axes must be unique"):
convapproach([1], [2], axes=[0, 0])
@pytest.mark.parametrize('dtype', [np.longfloat, np.longcomplex])
def test_longdtype_input(self, dtype):
x = np.random.random((27, 27)).astype(dtype)
y = np.random.random((4, 4)).astype(dtype)
if np.iscomplexobj(dtype()):
x += .1j
y -= .1j
res = fftconvolve(x, y)
assert_allclose(res, convolve(x, y, method='direct'))
assert res.dtype == dtype
class TestMedFilt(object):
def test_basic(self):
f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],
[50, 50, 50, 50, 50, 0, 72, 77, 68, 66],
[50, 50, 50, 50, 50, 46, 47, 19, 64, 77],
[50, 50, 50, 50, 50, 42, 15, 29, 95, 35],
[50, 50, 50, 50, 50, 46, 34, 9, 21, 66],
[70, 97, 28, 68, 78, 77, 61, 58, 71, 42],
[64, 53, 44, 29, 68, 32, 19, 68, 24, 84],
[3, 33, 53, 67, 1, 78, 74, 55, 12, 83],
[7, 11, 46, 70, 60, 47, 24, 43, 61, 26],
[32, 61, 88, 7, 39, 4, 92, 64, 45, 61]]
d = signal.medfilt(f, [7, 3])
e = signal.medfilt2d(np.array(f, float), [7, 3])
assert_array_equal(d, [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0],
[0, 50, 50, 50, 50, 42, 19, 21, 29, 0],
[50, 50, 50, 50, 50, 47, 34, 34, 46, 35],
[50, 50, 50, 50, 50, 50, 42, 47, 64, 42],
[50, 50, 50, 50, 50, 50, 46, 55, 64, 35],
[33, 50, 50, 50, 50, 47, 46, 43, 55, 26],
[32, 50, 50, 50, 50, 47, 46, 45, 55, 26],
[7, 46, 50, 50, 47, 46, 46, 43, 45, 21],
[0, 32, 33, 39, 32, 32, 43, 43, 43, 0],
[0, 7, 11, 7, 4, 4, 19, 19, 24, 0]])
assert_array_equal(d, e)
def test_none(self):
# Ticket #1124. Ensure this does not segfault.
with pytest.warns(UserWarning):
signal.medfilt(None)
# Expand on this test to avoid a regression with possible contiguous
# numpy arrays that have odd strides. The stride value below gets
# us into wrong memory if used (but it does not need to be used)
dummy = np.arange(10, dtype=np.float64)
a = dummy[5:6]
a.strides = 16
assert_(signal.medfilt(a, 1) == 5.)
def test_refcounting(self):
# Check a refcounting-related crash
a = Decimal(123)
x = np.array([a, a], dtype=object)
if hasattr(sys, 'getrefcount'):
n = 2 * sys.getrefcount(a)
else:
n = 10
# Shouldn't segfault:
with pytest.warns(UserWarning):
for j in range(n):
signal.medfilt(x)
if hasattr(sys, 'getrefcount'):
assert_(sys.getrefcount(a) < n)
assert_equal(x, [a, a])
class TestWiener(object):
def test_basic(self):
g = array([[5, 6, 4, 3],
[3, 5, 6, 2],
[2, 3, 5, 6],
[1, 6, 9, 7]], 'd')
h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667],
[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],
[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],
[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])
assert_array_almost_equal(signal.wiener(g), h, decimal=6)
assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6)
padtype_options = ["mean", "median", "minimum", "maximum", "line"]
padtype_options += _upfirdn_modes
class TestResample(object):
def test_basic(self):
# Some basic tests
# Regression test for issue #3603.
# window.shape must equal to sig.shape[0]
sig = np.arange(128)
num = 256
win = signal.get_window(('kaiser', 8.0), 160)
assert_raises(ValueError, signal.resample, sig, num, window=win)
# Other degenerate conditions
assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1)
assert_raises(ValueError, signal.resample_poly, sig, 1, 0)
assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='')
assert_raises(ValueError, signal.resample_poly, sig, 2, 1,
padtype='mean', cval=10)
# test for issue #6505 - should not modify window.shape when axis ≠ 0
sig2 = np.tile(np.arange(160), (2, 1))
signal.resample(sig2, num, axis=-1, window=win)
assert_(win.shape == (160,))
@pytest.mark.parametrize('window', (None, 'hamming'))
@pytest.mark.parametrize('N', (20, 19))
@pytest.mark.parametrize('num', (100, 101, 10, 11))
def test_rfft(self, N, num, window):
# Make sure the speed up using rfft gives the same result as the normal
# way using fft
x = np.linspace(0, 10, N, endpoint=False)
y = np.cos(-x**2/6.0)
assert_allclose(signal.resample(y, num, window=window),
signal.resample(y + 0j, num, window=window).real)
y = np.array([np.cos(-x**2/6.0), np.sin(-x**2/6.0)])
y_complex = y + 0j
assert_allclose(
signal.resample(y, num, axis=1, window=window),
signal.resample(y_complex, num, axis=1, window=window).real,
atol=1e-9)
def test_input_domain(self):
# Test if both input domain modes produce the same results.
tsig = np.arange(256) + 0j
fsig = fft(tsig)
num = 256
assert_allclose(
signal.resample(fsig, num, domain='freq'),
signal.resample(tsig, num, domain='time'),
atol=1e-9)
@pytest.mark.parametrize('nx', (1, 2, 3, 5, 8))
@pytest.mark.parametrize('ny', (1, 2, 3, 5, 8))
@pytest.mark.parametrize('dtype', ('float', 'complex'))
def test_dc(self, nx, ny, dtype):
x = np.array([1] * nx, dtype)
y = signal.resample(x, ny)
assert_allclose(y, [1] * ny)
@pytest.mark.parametrize('padtype', padtype_options)
def test_mutable_window(self, padtype):
# Test that a mutable window is not modified
impulse = np.zeros(3)
window = np.random.RandomState(0).randn(2)
window_orig = window.copy()
signal.resample_poly(impulse, 5, 1, window=window, padtype=padtype)
assert_array_equal(window, window_orig)
@pytest.mark.parametrize('padtype', padtype_options)
def test_output_float32(self, padtype):
# Test that float32 inputs yield a float32 output
x = np.arange(10, dtype=np.float32)
h = np.array([1, 1, 1], dtype=np.float32)
y = signal.resample_poly(x, 1, 2, window=h, padtype=padtype)
assert(y.dtype == np.float32)
@pytest.mark.parametrize(
"method, ext, padtype",
[("fft", False, None)]
+ list(
product(
["polyphase"], [False, True], padtype_options,
)
),
)
def test_resample_methods(self, method, ext, padtype):
# Test resampling of sinusoids and random noise (1-sec)
rate = 100
rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201]
# Sinusoids, windowed to avoid edge artifacts
t = np.arange(rate) / float(rate)
freqs = np.array((1., 10., 40.))[:, np.newaxis]
x = np.sin(2 * np.pi * freqs * t) * hann(rate)
for rate_to in rates_to:
t_to = np.arange(rate_to) / float(rate_to)
y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to)
if method == 'fft':
y_resamps = signal.resample(x, rate_to, axis=-1)
else:
if ext and rate_to != rate:
# Match default window design
g = gcd(rate_to, rate)
up = rate_to // g
down = rate // g
max_rate = max(up, down)
f_c = 1. / max_rate
half_len = 10 * max_rate
window = signal.firwin(2 * half_len + 1, f_c,
window=('kaiser', 5.0))
polyargs = {'window': window, 'padtype': padtype}
else:
polyargs = {'padtype': padtype}
y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1,
**polyargs)
for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs):
if freq >= 0.5 * rate_to:
y_to.fill(0.) # mostly low-passed away
if padtype in ['minimum', 'maximum']:
assert_allclose(y_resamp, y_to, atol=3e-1)
else:
assert_allclose(y_resamp, y_to, atol=1e-3)
else:
assert_array_equal(y_to.shape, y_resamp.shape)
corr = np.corrcoef(y_to, y_resamp)[0, 1]
assert_(corr > 0.99, msg=(corr, rate, rate_to))
# Random data
rng = np.random.RandomState(0)
x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind
for rate_to in rates_to:
# random data
t_to = np.arange(rate_to) / float(rate_to)
y_to = np.interp(t_to, t, x)
if method == 'fft':
y_resamp = signal.resample(x, rate_to)
else:
y_resamp = signal.resample_poly(x, rate_to, rate,
padtype=padtype)
assert_array_equal(y_to.shape, y_resamp.shape)
corr = np.corrcoef(y_to, y_resamp)[0, 1]
assert_(corr > 0.99, msg=corr)
# More tests of fft method (Master 0.18.1 fails these)
if method == 'fft':
x1 = np.array([1.+0.j, 0.+0.j])
y1_test = signal.resample(x1, 4)
# upsampling a complex array
y1_true = np.array([1.+0.j, 0.5+0.j, 0.+0.j, 0.5+0.j])
assert_allclose(y1_test, y1_true, atol=1e-12)
x2 = np.array([1., 0.5, 0., 0.5])
y2_test = signal.resample(x2, 2) # downsampling a real array
y2_true = np.array([1., 0.])
assert_allclose(y2_test, y2_true, atol=1e-12)
def test_poly_vs_filtfilt(self):
# Check that up=1.0 gives same answer as filtfilt + slicing
random_state = np.random.RandomState(17)
try_types = (int, np.float32, np.complex64, float, complex)
size = 10000
down_factors = [2, 11, 79]
for dtype in try_types:
x = random_state.randn(size).astype(dtype)
if dtype in (np.complex64, np.complex128):
x += 1j * random_state.randn(size)
# resample_poly assumes zeros outside of signl, whereas filtfilt
# can only constant-pad. Make them equivalent:
x[0] = 0
x[-1] = 0
for down in down_factors:
h = signal.firwin(31, 1. / down, window='hamming')
yf = filtfilt(h, 1.0, x, padtype='constant')[::down]
# Need to pass convolved version of filter to resample_poly,
# since filtfilt does forward and backward, but resample_poly
# only goes forward
hc = convolve(h, h[::-1])
y = signal.resample_poly(x, 1, down, window=hc)
assert_allclose(yf, y, atol=1e-7, rtol=1e-7)
def test_correlate1d(self):
for down in [2, 4]:
for nx in range(1, 40, down):
for nweights in (32, 33):
x = np.random.random((nx,))
weights = np.random.random((nweights,))
y_g = correlate1d(x, weights[::-1], mode='constant')
y_s = signal.resample_poly(
x, up=1, down=down, window=weights)
assert_allclose(y_g[::down], y_s)
class TestCSpline1DEval(object):
def test_basic(self):
y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0])
x = arange(len(y))
dx = x[1] - x[0]
cj = signal.cspline1d(y)
x2 = arange(len(y) * 10.0) / 10.0
y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0])
# make sure interpolated values are on knot points
assert_array_almost_equal(y2[::10], y, decimal=5)
def test_complex(self):
# create some smoothly varying complex signal to interpolate
x = np.arange(2)
y = np.zeros(x.shape, dtype=np.complex64)
T = 10.0
f = 1.0 / T
y = np.exp(2.0J * np.pi * f * x)
# get the cspline transform
cy = signal.cspline1d(y)
# determine new test x value and interpolate
xnew = np.array([0.5])
ynew = signal.cspline1d_eval(cy, xnew)
assert_equal(ynew.dtype, y.dtype)
class TestOrderFilt(object):
def test_basic(self):
assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1),
[2, 3, 2])
class _TestLinearFilter(object):
def generate(self, shape):
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
return self.convert_dtype(x)
def convert_dtype(self, arr):
if self.dtype == np.dtype('O'):
arr = np.asarray(arr)
out = np.empty(arr.shape, self.dtype)
iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'],
[['readonly'],['writeonly']])
for x, y in iter:
y[...] = self.type(x[()])
return out
else:
return np.array(arr, self.dtype, copy=False)
def test_rank_1_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, -0.5])
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.])
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_IIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([0.5, -0.5])
zi = self.convert_dtype([1, 2])
y_r = self.convert_dtype([1, 5, 9, 13, 17, 21])
zf_r = self.convert_dtype([13, -10])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_1_FIR_init_cond(self):
x = self.generate((6,))
b = self.convert_dtype([1, 1, 1])
a = self.convert_dtype([1])
zi = self.convert_dtype([1, 1])
y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.])
zf_r = self.convert_dtype([9, 5])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_0(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4],
[6, 4, 2]])
y = lfilter(b, a, x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
def test_rank_2_IIR_axis_1(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]])
y = lfilter(b, a, x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank_2_IIR_axis_0_init_cond(self):
x = self.generate((4, 3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((4,1)))
y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13],
[19, -17, 19]])
zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis]
y, zf = lfilter(b, a, x, axis=1, zi=zi)
assert_array_almost_equal(y_r2_a0_1, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_1_init_cond(self):
x = self.generate((4,3))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
zi = self.convert_dtype(np.ones((1,3)))
y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1],
[1, 3, 5], [5, 3, 1]])
zf_r = self.convert_dtype([[-23, -23, -23]])
y, zf = lfilter(b, a, x, axis=0, zi=zi)
assert_array_almost_equal(y_r2_a0_0, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_IIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_IIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, -1])
a = self.convert_dtype([0.5, 0.5])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 1
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_FIR(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x)
assert_array_almost_equal(y, y_r)
def test_rank_3_FIR_init_cond(self):
x = self.generate((4, 3, 2))
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 2
zi = self.convert_dtype(np.ones(zi_shape))
zi1 = self.convert_dtype([1, 1])
y, zf = lfilter(b, a, x, axis, zi)
lf0 = lambda w: lfilter(b, a, w, zi=zi1)[0]
lf1 = lambda w: lfilter(b, a, w, zi=zi1)[1]
y_r = np.apply_along_axis(lf0, axis, x)
zf_r = np.apply_along_axis(lf1, axis, x)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_zi_pseudobroadcast(self):
x = self.generate((4, 5, 20))
b,a = signal.butter(8, 0.2, output='ba')
b = self.convert_dtype(b)
a = self.convert_dtype(a)
zi_size = b.shape[0] - 1
# lfilter requires x.ndim == zi.ndim exactly. However, zi can have
# length 1 dimensions.
zi_full = self.convert_dtype(np.ones((4, 5, zi_size)))
zi_sing = self.convert_dtype(np.ones((1, 1, zi_size)))
y_full, zf_full = lfilter(b, a, x, zi=zi_full)
y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing)
assert_array_almost_equal(y_sing, y_full)
assert_array_almost_equal(zf_full, zf_sing)
# lfilter does not prepend ones
assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size))
def test_scalar_a(self):
# a can be a scalar.
x = self.generate(6)
b = self.convert_dtype([1, 0, -1])
a = self.convert_dtype([1])
y_r = self.convert_dtype([0, 1, 2, 2, 2, 2])
y = lfilter(b, a[0], x)
assert_array_almost_equal(y, y_r)
def test_zi_some_singleton_dims(self):
# lfilter doesn't really broadcast (no prepending of 1's). But does
# do singleton expansion if x and zi have the same ndim. This was
# broken only if a subset of the axes were singletons (gh-4681).
x = self.convert_dtype(np.zeros((3,2,5), 'l'))
b = self.convert_dtype(np.ones(5, 'l'))
a = self.convert_dtype(np.array([1,0,0]))
zi = np.ones((3,1,4), 'l')
zi[1,:,:] *= 2
zi[2,:,:] *= 3
zi = self.convert_dtype(zi)
zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l'))
y_expected = np.zeros((3,2,5), 'l')
y_expected[:,:,:4] = [[[1]], [[2]], [[3]]]
y_expected = self.convert_dtype(y_expected)
# IIR
y_iir, zf_iir = lfilter(b, a, x, -1, zi)
assert_array_almost_equal(y_iir, y_expected)
assert_array_almost_equal(zf_iir, zf_expected)
# FIR
y_fir, zf_fir = lfilter(b, a[0], x, -1, zi)
assert_array_almost_equal(y_fir, y_expected)
assert_array_almost_equal(zf_fir, zf_expected)
def base_bad_size_zi(self, b, a, x, axis, zi):
b = self.convert_dtype(b)
a = self.convert_dtype(a)
x = self.convert_dtype(x)
zi = self.convert_dtype(zi)
assert_raises(ValueError, lfilter, b, a, x, axis, zi)
def test_bad_size_zi(self):
# rank 1
x1 = np.arange(6)
self.base_bad_size_zi([1], [1], x1, -1, [1])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1])
self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]])
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]])
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1])
self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]])
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2])
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3])
# rank 2
x2 = np.arange(12).reshape((4,3))
# for axis=0 zi.shape should == (max(len(a),len(b))-1, 3)
self.base_bad_size_zi([1], [1], x2, 0, [0])
# for each of these there are 5 cases tested (in this order):
# 1. not deep enough, right # elements
# 2. too deep, right # elements
# 3. right depth, right # elements, transposed
# 4. right depth, too few elements
# 5. right depth, too many elements
self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]])
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]])
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]])
# for axis=1 zi.shape should == (4, max(len(a),len(b))-1)
self.base_bad_size_zi([1], [1], x2, 1, [0])
self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]])
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]])
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]])
def test_empty_zi(self):
# Regression test for #880: empty array for zi crashes.
x = self.generate((5,))
a = self.convert_dtype([1])
b = self.convert_dtype([1])
zi = self.convert_dtype([])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, x)
assert_equal(zf.dtype, self.dtype)
assert_equal(zf.size, 0)
def test_lfiltic_bad_zi(self):
# Regression test for #3699: bad initial conditions
a = self.convert_dtype([1])
b = self.convert_dtype([1])
# "y" sets the datatype of zi, so it truncates if int
zi = lfiltic(b, a, [1., 0])
zi_1 = lfiltic(b, a, [1, 0])
zi_2 = lfiltic(b, a, [True, False])
assert_array_equal(zi, zi_1)
assert_array_equal(zi, zi_2)
def test_short_x_FIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([7, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_short_x_IIR(self):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1, 1])
b = self.convert_dtype([1, 0, -1])
zi = self.convert_dtype([2, 7])
x = self.convert_dtype([72])
ye = self.convert_dtype([74])
zfe = self.convert_dtype([-67, -72])
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_do_not_modify_a_b_IIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, -1])
b0 = b.copy()
a = self.convert_dtype([0.5, -0.5])
a0 = a.copy()
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
def test_do_not_modify_a_b_FIR(self):
x = self.generate((6,))
b = self.convert_dtype([1, 0, 1])
b0 = b.copy()
a = self.convert_dtype([2])
a0 = a.copy()
y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.])
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
assert_equal(b, b0)
assert_equal(a, a0)
class TestLinearFilterFloat32(_TestLinearFilter):
dtype = np.dtype('f')
class TestLinearFilterFloat64(_TestLinearFilter):
dtype = np.dtype('d')
class TestLinearFilterFloatExtended(_TestLinearFilter):
dtype = np.dtype('g')
class TestLinearFilterComplex64(_TestLinearFilter):
dtype = np.dtype('F')
class TestLinearFilterComplex128(_TestLinearFilter):
dtype = np.dtype('D')
class TestLinearFilterComplexExtended(_TestLinearFilter):
dtype = np.dtype('G')
class TestLinearFilterDecimal(_TestLinearFilter):
dtype = np.dtype('O')
def type(self, x):
return Decimal(str(x))
class TestLinearFilterObject(_TestLinearFilter):
dtype = np.dtype('O')
type = float
def test_lfilter_bad_object():
# lfilter: object arrays with non-numeric objects raise TypeError.
# Regression test for ticket #1452.
assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0])
assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0])
assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0])
with assert_raises(ValueError, match='common type'):
lfilter([1.], [1., 1.], ['a', 'b', 'c'])
def test_lfilter_notimplemented_input():
# Should not crash, gh-7991
assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5])
@pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short,
np.uint, int, np.ulonglong, np.ulonglong,
np.float32, np.float64, np.longdouble,
Decimal])
class TestCorrelateReal(object):
def _setup_rank1(self, dt):
a = np.linspace(0, 3, 4).astype(dt)
b = np.linspace(1, 2, 2).astype(dt)
y_r = np.array([0, 2, 5, 8, 3]).astype(dt)
return a, b, y_r
def equal_tolerance(self, res_dt):
# default value of keyword
decimal = 6
try:
dt_info = np.finfo(res_dt)
if hasattr(dt_info, 'resolution'):
decimal = int(-0.5*np.log10(dt_info.resolution))
except Exception:
pass
return decimal
def equal_tolerance_fft(self, res_dt):
# FFT implementations convert longdouble arguments down to
# double so don't expect better precision, see gh-9520
if res_dt == np.longdouble:
return self.equal_tolerance(np.double)
else:
return self.equal_tolerance(res_dt)
def test_method(self, dt):
if dt == Decimal:
method = choose_conv_method([Decimal(4)], [Decimal(3)])
assert_equal(method, 'direct')
else:
a, b, y_r = self._setup_rank3(dt)
y_fft = correlate(a, b, method='fft')
y_direct = correlate(a, b, method='direct')
assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype))
assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype))
assert_equal(y_fft.dtype, dt)
assert_equal(y_direct.dtype, dt)
def test_rank1_valid(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r[1:4])
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, y_r[1:4][::-1])
assert_equal(y.dtype, dt)
def test_rank1_same(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r[:-1])
assert_equal(y.dtype, dt)
def test_rank1_full(self, dt):
a, b, y_r = self._setup_rank1(dt)
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r)
assert_equal(y.dtype, dt)
def _setup_rank3(self, dt):
a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(
dt)
b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(
dt)
y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.],
[46., 432., 1062., 1840., 2672., 1698., 864., 266.],
[134., 736., 1662., 2768., 3920., 2418., 1168., 314.],
[260., 952., 1932., 3056., 4208., 2580., 1240., 332.],
[202., 664., 1290., 1984., 2688., 1590., 712., 150.],
[114., 344., 642., 960., 1280., 726., 296., 38.]],
[[23., 400., 1035., 1832., 2696., 1737., 904., 293.],
[134., 920., 2166., 3680., 5280., 3306., 1640., 474.],
[325., 1544., 3369., 5512., 7720., 4683., 2192., 535.],
[571., 1964., 3891., 6064., 8272., 4989., 2324., 565.],
[434., 1360., 2586., 3920., 5264., 3054., 1312., 230.],
[241., 700., 1281., 1888., 2496., 1383., 532., 39.]],
[[22., 214., 528., 916., 1332., 846., 430., 132.],
[86., 484., 1098., 1832., 2600., 1602., 772., 206.],
[188., 802., 1698., 2732., 3788., 2256., 1018., 218.],
[308., 1006., 1950., 2996., 4052., 2400., 1078., 230.],
[230., 692., 1290., 1928., 2568., 1458., 596., 78.],
[126., 354., 636., 924., 1212., 654., 234., 0.]]],
dtype=dt)
return a, b, y_r
def test_rank3_valid(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5])
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, "valid")
assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1])
assert_equal(y.dtype, dt)
def test_rank3_same(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b, "same")
assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2])
assert_equal(y.dtype, dt)
def test_rank3_all(self, dt):
a, b, y_r = self._setup_rank3(dt)
y = correlate(a, b)
assert_array_almost_equal(y, y_r)
assert_equal(y.dtype, dt)
class TestCorrelate(object):
# Tests that don't depend on dtype
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'})
def test_invalid_params(self):
a = [3, 4, 5]
b = [1, 2, 3]
assert_raises(ValueError, correlate, a, b, mode='spam')
assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft')
assert_raises(ValueError, correlate, a, b, mode='ham', method='direct')
assert_raises(ValueError, correlate, a, b, mode='full', method='bacon')
assert_raises(ValueError, correlate, a, b, mode='same', method='bacon')
def test_mismatched_dims(self):
# Input arrays should have the same number of dimensions
assert_raises(ValueError, correlate, [1], 2, method='direct')
assert_raises(ValueError, correlate, 1, [2], method='direct')
assert_raises(ValueError, correlate, [1], 2, method='fft')
assert_raises(ValueError, correlate, 1, [2], method='fft')
assert_raises(ValueError, correlate, [1], [[2]])
assert_raises(ValueError, correlate, [3], 2)
def test_numpy_fastpath(self):
a = [1, 2, 3]
b = [4, 5]
assert_allclose(correlate(a, b, mode='same'), [5, 14, 23])
a = [1, 2, 3]
b = [4, 5, 6]
assert_allclose(correlate(a, b, mode='same'), [17, 32, 23])
assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12])
assert_allclose(correlate(a, b, mode='valid'), [32])
@pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble])
class TestCorrelateComplex(object):
# The decimal precision to be used for comparing results.
# This value will be passed as the 'decimal' keyword argument of
# assert_array_almost_equal().
# Since correlate may chose to use FFT method which converts
# longdoubles to doubles internally don't expect better precision
# for longdouble than for double (see gh-9520).
def decimal(self, dt):
if dt == np.clongdouble:
dt = np.cdouble
return int(2 * np.finfo(dt).precision / 3)
def _setup_rank1(self, dt, mode):
np.random.seed(9)
a = np.random.randn(10).astype(dt)
a += 1j * np.random.randn(10).astype(dt)
b = np.random.randn(8).astype(dt)
b += 1j * np.random.randn(8).astype(dt)
y_r = (correlate(a.real, b.real, mode=mode) +
correlate(a.imag, b.imag, mode=mode)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag, mode=mode) +
correlate(a.imag, b.real, mode=mode))
return a, b, y_r
def test_rank1_valid(self, dt):
a, b, y_r = self._setup_rank1(dt, 'valid')
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
# See gh-5897
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_rank1_same(self, dt):
a, b, y_r = self._setup_rank1(dt, 'same')
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_rank1_full(self, dt):
a, b, y_r = self._setup_rank1(dt, 'full')
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt))
assert_equal(y.dtype, dt)
def test_swap_full(self, dt):
d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt)
k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt)
y = correlate(d, k)
assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j])
def test_swap_same(self, dt):
d = [0.+0.j, 1.+1.j, 2.+2.j]
k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j]
y = correlate(d, k, mode="same")
assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j])
def test_rank3(self, dt):
a = np.random.randn(10, 8, 6).astype(dt)
a += 1j * np.random.randn(10, 8, 6).astype(dt)
b = np.random.randn(8, 6, 4).astype(dt)
b += 1j * np.random.randn(8, 6, 4).astype(dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
assert_equal(y.dtype, dt)
def test_rank0(self, dt):
a = np.array(np.random.randn()).astype(dt)
a += 1j * np.array(np.random.randn()).astype(dt)
b = np.array(np.random.randn()).astype(dt)
b += 1j * np.array(np.random.randn()).astype(dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1)
assert_equal(y.dtype, dt)
assert_equal(correlate([1], [2j]), correlate(1, 2j))
assert_equal(correlate([2j], [3j]), correlate(2j, 3j))
assert_equal(correlate([3j], [4]), correlate(3j, 4))
class TestCorrelate2d(object):
def test_consistency_correlate_funcs(self):
# Compare np.correlate, signal.correlate, signal.correlate2d
a = np.arange(5)
b = np.array([3.2, 1.4, 3])
for mode in ['full', 'valid', 'same']:
assert_almost_equal(np.correlate(a, b, mode=mode),
signal.correlate(a, b, mode=mode))
assert_almost_equal(np.squeeze(signal.correlate2d([a], [b],
mode=mode)),
signal.correlate(a, b, mode=mode))
# See gh-5897
if mode == 'valid':
assert_almost_equal(np.correlate(b, a, mode=mode),
signal.correlate(b, a, mode=mode))
assert_almost_equal(np.squeeze(signal.correlate2d([b], [a],
mode=mode)),
signal.correlate(b, a, mode=mode))
def test_invalid_shapes(self):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'})
def test_complex_input(self):
assert_equal(signal.correlate2d([[1]], [[2j]]), -2j)
assert_equal(signal.correlate2d([[2j]], [[3j]]), 6)
assert_equal(signal.correlate2d([[3j]], [[4]]), 12j)
class TestLFilterZI(object):
def test_basic(self):
a = np.array([1.0, -1.0, 0.5])
b = np.array([1.0, 0.0, 2.0])
zi_expected = np.array([5.0, -1.0])
zi = lfilter_zi(b, a)
assert_array_almost_equal(zi, zi_expected)
def test_scale_invariance(self):
# Regression test. There was a bug in which b was not correctly
# rescaled when a[0] was nonzero.
b = np.array([2, 8, 5])
a = np.array([1, 1, 8])
zi1 = lfilter_zi(b, a)
zi2 = lfilter_zi(2*b, 2*a)
assert_allclose(zi2, zi1, rtol=1e-12)
class TestFiltFilt(object):
filtfilt_kind = 'tf'
def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None,
method='pad', irlen=None):
if self.filtfilt_kind == 'tf':
b, a = zpk2tf(*zpk)
return filtfilt(b, a, x, axis, padtype, padlen, method, irlen)
elif self.filtfilt_kind == 'sos':
sos = zpk2sos(*zpk)
return sosfiltfilt(sos, x, axis, padtype, padlen)
def test_basic(self):
zpk = tf2zpk([1, 2, 3], [1, 2, 3])
out = self.filtfilt(zpk, np.arange(12))
assert_allclose(out, arange(12), atol=5.28e-11)
def test_sine(self):
rate = 2000
t = np.linspace(0, 1.0, rate + 1)
# A signal with low frequency and a high frequency.
xlow = np.sin(5 * 2 * np.pi * t)
xhigh = np.sin(250 * 2 * np.pi * t)
x = xlow + xhigh
zpk = butter(8, 0.125, output='zpk')
# r is the magnitude of the largest pole.
r = np.abs(zpk[1]).max()
eps = 1e-5
# n estimates the number of steps for the
# transient to decay by a factor of eps.
n = int(np.ceil(np.log(eps) / np.log(r)))
# High order lowpass filter...
y = self.filtfilt(zpk, x, padlen=n)
# Result should be just xlow.
err = np.abs(y - xlow).max()
assert_(err < 1e-4)
# A 2D case.
x2d = np.vstack([xlow, xlow + xhigh])
y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1)
assert_equal(y2d.shape, x2d.shape)
err = np.abs(y2d - xlow).max()
assert_(err < 1e-4)
# Use the previous result to check the use of the axis keyword.
# (Regression test for ticket #1620)
y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0)
assert_equal(y2d, y2dt.T)
def test_axis(self):
# Test the 'axis' keyword on a 3D array.
x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12)
zpk = butter(3, 0.125, output='zpk')
y0 = self.filtfilt(zpk, x, padlen=0, axis=0)
y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1)
assert_array_equal(y0, np.swapaxes(y1, 0, 1))
y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2)
assert_array_equal(y0, np.swapaxes(y2, 0, 2))
def test_acoeff(self):
if self.filtfilt_kind != 'tf':
return # only necessary for TF
# test for 'a' coefficient as single number
out = signal.filtfilt([.5, .5], 1, np.arange(10))
assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14)
def test_gust_simple(self):
if self.filtfilt_kind != 'tf':
pytest.skip('gust only implemented for TF systems')
# The input array has length 2. The exact solution for this case
# was computed "by hand".
x = np.array([1.0, 2.0])
b = np.array([0.5])
a = np.array([1.0, -0.5])
y, z1, z2 = _filtfilt_gust(b, a, x)
assert_allclose([z1[0], z2[0]],
[0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]])
assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1],
0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]])
def test_gust_scalars(self):
if self.filtfilt_kind != 'tf':
pytest.skip('gust only implemented for TF systems')
# The filter coefficients are both scalars, so the filter simply
# multiplies its input by b/a. When it is used in filtfilt, the
# factor is (b/a)**2.
x = np.arange(12)
b = 3.0
a = 2.0
y = filtfilt(b, a, x, method="gust")
expected = (b/a)**2 * x
assert_allclose(y, expected)
class TestSOSFiltFilt(TestFiltFilt):
filtfilt_kind = 'sos'
def test_equivalence(self):
"""Test equivalence between sosfiltfilt and filtfilt"""
x = np.random.RandomState(0).randn(1000)
for order in range(1, 6):
zpk = signal.butter(order, 0.35, output='zpk')
b, a = zpk2tf(*zpk)
sos = zpk2sos(*zpk)
y = filtfilt(b, a, x)
y_sos = sosfiltfilt(sos, x)
assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order)
def filtfilt_gust_opt(b, a, x):
"""
An alternative implementation of filtfilt with Gustafsson edges.
This function computes the same result as
`scipy.signal.signaltools._filtfilt_gust`, but only 1-d arrays
are accepted. The problem is solved using `fmin` from `scipy.optimize`.
`_filtfilt_gust` is significanly faster than this implementation.
"""
def filtfilt_gust_opt_func(ics, b, a, x):
"""Objective function used in filtfilt_gust_opt."""
m = max(len(a), len(b)) - 1
z0f = ics[:m]
z0b = ics[m:]
y_f = lfilter(b, a, x, zi=z0f)[0]
y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1]
y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
y_bf = lfilter(b, a, y_b, zi=z0f)[0]
value = np.sum((y_fb - y_bf)**2)
return value
m = max(len(a), len(b)) - 1
zi = lfilter_zi(b, a)
ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi))
result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x),
xtol=1e-10, ftol=1e-12,
maxfun=10000, maxiter=10000,
full_output=True, disp=False)
opt, fopt, niter, funcalls, warnflag = result
if warnflag > 0:
raise RuntimeError("minimization failed in filtfilt_gust_opt: "
"warnflag=%d" % warnflag)
z0f = opt[:m]
z0b = opt[m:]
# Apply the forward-backward filter using the computed initial
# conditions.
y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1]
y = lfilter(b, a, y_b, zi=z0f)[0]
return y, z0f, z0b
def check_filtfilt_gust(b, a, shape, axis, irlen=None):
# Generate x, the data to be filtered.
np.random.seed(123)
x = np.random.randn(*shape)
# Apply filtfilt to x. This is the main calculation to be checked.
y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen)
# Also call the private function so we can test the ICs.
yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
# filtfilt_gust_opt is an independent implementation that gives the
# expected result, but it only handles 1-D arrays, so use some looping
# and reshaping shenanigans to create the expected output arrays.
xx = np.swapaxes(x, axis, -1)
out_shape = xx.shape[:-1]
yo = np.empty_like(xx)
m = max(len(a), len(b)) - 1
zo1 = np.empty(out_shape + (m,))
zo2 = np.empty(out_shape + (m,))
for indx in product(*[range(d) for d in out_shape]):
yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx])
yo = np.swapaxes(yo, -1, axis)
zo1 = np.swapaxes(zo1, -1, axis)
zo2 = np.swapaxes(zo2, -1, axis)
assert_allclose(y, yo, rtol=1e-9, atol=1e-10)
assert_allclose(yg, yo, rtol=1e-9, atol=1e-10)
assert_allclose(zg1, zo1, rtol=1e-9, atol=1e-10)
assert_allclose(zg2, zo2, rtol=1e-9, atol=1e-10)
def test_choose_conv_method():
for mode in ['valid', 'same', 'full']:
for ndim in [1, 2]:
n, k, true_method = 8, 6, 'direct'
x = np.random.randn(*((n,) * ndim))
h = np.random.randn(*((k,) * ndim))
method = choose_conv_method(x, h, mode=mode)
assert_equal(method, true_method)
method_try, times = choose_conv_method(x, h, mode=mode, measure=True)
assert_(method_try in {'fft', 'direct'})
assert_(type(times) is dict)
assert_('fft' in times.keys() and 'direct' in times.keys())
n = 10
for not_fft_conv_supp in ["complex256", "complex192"]:
if hasattr(np, not_fft_conv_supp):
x = np.ones(n, dtype=not_fft_conv_supp)
h = x.copy()
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
x = np.array([2**51], dtype=np.int64)
h = x.copy()
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
x = [Decimal(3), Decimal(2)]
h = [Decimal(1), Decimal(4)]
assert_equal(choose_conv_method(x, h, mode=mode), 'direct')
def test_filtfilt_gust():
# Design a filter.
z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk')
# Find the approximate impulse response length of the filter.
eps = 1e-10
r = np.max(np.abs(p))
approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
np.random.seed(123)
b, a = zpk2tf(z, p, k)
for irlen in [None, approx_impulse_len]:
signal_len = 5 * approx_impulse_len
# 1-d test case
check_filtfilt_gust(b, a, (signal_len,), 0, irlen)
# 3-d test case; test each axis.
for axis in range(3):
shape = [2, 2, 2]
shape[axis] = signal_len
check_filtfilt_gust(b, a, shape, axis, irlen)
# Test case with length less than 2*approx_impulse_len.
# In this case, `filtfilt_gust` should behave the same as if
# `irlen=None` was given.
length = 2*approx_impulse_len - 50
check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len)
class TestDecimate(object):
def test_bad_args(self):
x = np.arange(12)
assert_raises(TypeError, signal.decimate, x, q=0.5, n=1)
assert_raises(TypeError, signal.decimate, x, q=2, n=0.5)
def test_basic_IIR(self):
x = np.arange(12)
y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round()
assert_array_equal(y, x[::2])
def test_basic_FIR(self):
x = np.arange(12)
y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round()
assert_array_equal(y, x[::2])
def test_shape(self):
# Regression test for ticket #1480.
z = np.zeros((30, 30))
d0 = signal.decimate(z, 2, axis=0, zero_phase=False)
assert_equal(d0.shape, (15, 30))
d1 = signal.decimate(z, 2, axis=1, zero_phase=False)
assert_equal(d1.shape, (30, 15))
def test_phaseshift_FIR(self):
with suppress_warnings() as sup:
sup.filter(BadCoefficients, "Badly conditioned filter")
self._test_phaseshift(method='fir', zero_phase=False)
def test_zero_phase_FIR(self):
with suppress_warnings() as sup:
sup.filter(BadCoefficients, "Badly conditioned filter")
self._test_phaseshift(method='fir', zero_phase=True)
def test_phaseshift_IIR(self):
self._test_phaseshift(method='iir', zero_phase=False)
def test_zero_phase_IIR(self):
self._test_phaseshift(method='iir', zero_phase=True)
def _test_phaseshift(self, method, zero_phase):
rate = 120
rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3
t_tot = int(100) # Need to let antialiasing filters settle
t = np.arange(rate*t_tot+1) / float(rate)
# Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts
freqs = np.array(rates_to) * 0.8 / 2
d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t)
* signal.windows.tukey(t.size, 0.1))
for rate_to in rates_to:
q = rate // rate_to
t_to = np.arange(rate_to*t_tot+1) / float(rate_to)
d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to)
* signal.windows.tukey(t_to.size, 0.1))
# Set up downsampling filters, match v0.17 defaults
if method == 'fir':
n = 30
system = signal.dlti(signal.firwin(n + 1, 1. / q,
window='hamming'), 1.)
elif method == 'iir':
n = 8
wc = 0.8*np.pi/q
system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi))
# Calculate expected phase response, as unit complex vector
if zero_phase is False:
_, h_resps = signal.freqz(system.num, system.den,
freqs/rate*2*np.pi)
h_resps /= np.abs(h_resps)
else:
h_resps = np.ones_like(freqs)
y_resamps = signal.decimate(d.real, q, n, ftype=system,
zero_phase=zero_phase)
# Get phase from complex inner product, like CSD
h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1)
h_resamps /= np.abs(h_resamps)
subnyq = freqs < 0.5*rate_to
# Complex vectors should be aligned, only compare below nyquist
assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0,
atol=1e-3, rtol=1e-3)
def test_auto_n(self):
# Test that our value of n is a reasonable choice (depends on
# the downsampling factor)
sfreq = 100.
n = 1000
t = np.arange(n) / sfreq
# will alias for decimations (>= 15)
x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t)
assert_allclose(np.linalg.norm(x), 1., rtol=1e-3)
x_out = signal.decimate(x, 30, ftype='fir')
assert_array_less(np.linalg.norm(x_out), 0.01)
class TestHilbert(object):
def test_bad_args(self):
x = np.array([1.0 + 0.0j])
assert_raises(ValueError, hilbert, x)
x = np.arange(8.0)
assert_raises(ValueError, hilbert, x, N=0)
def test_hilbert_theoretical(self):
# test cases by Ariel Rokem
decimal = 14
pi = np.pi
t = np.arange(0, 2 * pi, pi / 256)
a0 = np.sin(t)
a1 = np.cos(t)
a2 = np.sin(2 * t)
a3 = np.cos(2 * t)
a = np.vstack([a0, a1, a2, a3])
h = hilbert(a)
h_abs = np.abs(h)
h_angle = np.angle(h)
h_real = np.real(h)
# The real part should be equal to the original signals:
assert_almost_equal(h_real, a, decimal)
# The absolute value should be one everywhere, for this input:
assert_almost_equal(h_abs, np.ones(a.shape), decimal)
# For the 'slow' sine - the phase should go from -pi/2 to pi/2 in
# the first 256 bins:
assert_almost_equal(h_angle[0, :256],
np.arange(-pi / 2, pi / 2, pi / 256),
decimal)
# For the 'slow' cosine - the phase should go from 0 to pi in the
# same interval:
assert_almost_equal(
h_angle[1, :256], np.arange(0, pi, pi / 256), decimal)
# The 'fast' sine should make this phase transition in half the time:
assert_almost_equal(h_angle[2, :128],
np.arange(-pi / 2, pi / 2, pi / 128),
decimal)
# Ditto for the 'fast' cosine:
assert_almost_equal(
h_angle[3, :128], np.arange(0, pi, pi / 128), decimal)
# The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia
assert_almost_equal(h[1].imag, a0, decimal)
def test_hilbert_axisN(self):
# tests for axis and N arguments
a = np.arange(18).reshape(3, 6)
# test axis
aa = hilbert(a, axis=-1)
assert_equal(hilbert(a.T, axis=0), aa.T)
# test 1d
assert_almost_equal(hilbert(a[0]), aa[0], 14)
# test N
aan = hilbert(a, N=20, axis=-1)
assert_equal(aan.shape, [3, 20])
assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3])
# the next test is just a regression test,
# no idea whether numbers make sense
a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j,
1.000000000000000e+00 - 2.047794505137069j,
1.999999999999999e+00 - 2.244055555687583j,
3.000000000000000e+00 - 1.262750302935009j,
4.000000000000000e+00 - 1.066489252384493j,
5.000000000000000e+00 + 2.918022706971047j,
8.881784197001253e-17 + 3.845658908989067j,
-9.444121133484362e-17 + 0.985044202202061j,
-1.776356839400251e-16 + 1.332257797702019j,
-3.996802888650564e-16 + 0.501905089898885j,
1.332267629550188e-16 + 0.668696078880782j,
-1.192678053963799e-16 + 0.235487067862679j,
-1.776356839400251e-16 + 0.286439612812121j,
3.108624468950438e-16 + 0.031676888064907j,
1.332267629550188e-16 - 0.019275656884536j,
-2.360035624836702e-16 - 0.1652588660287j,
0.000000000000000e+00 - 0.332049855010597j,
3.552713678800501e-16 - 0.403810179797771j,
8.881784197001253e-17 - 0.751023775297729j,
9.444121133484362e-17 - 0.79252210110103j])
assert_almost_equal(aan[0], a0hilb, 14, 'N regression')
class TestHilbert2(object):
def test_bad_args(self):
# x must be real.
x = np.array([[1.0 + 0.0j]])
assert_raises(ValueError, hilbert2, x)
# x must be rank 2.
x = np.arange(24).reshape(2, 3, 4)
assert_raises(ValueError, hilbert2, x)
# Bad value for N.
x = np.arange(16).reshape(4, 4)
assert_raises(ValueError, hilbert2, x, N=0)
assert_raises(ValueError, hilbert2, x, N=(2, 0))
assert_raises(ValueError, hilbert2, x, N=(2,))
class TestPartialFractionExpansion(object):
@staticmethod
def assert_rp_almost_equal(r, p, r_true, p_true, decimal=7):
r_true = np.asarray(r_true)
p_true = np.asarray(p_true)
distance = np.hypot(abs(p[:, None] - p_true),
abs(r[:, None] - r_true))
rows, cols = linear_sum_assignment(distance)
assert_almost_equal(p[rows], p_true[cols], decimal=decimal)
assert_almost_equal(r[rows], r_true[cols], decimal=decimal)
def test_compute_factors(self):
factors, poly = _compute_factors([1, 2, 3], [3, 2, 1])
assert_equal(len(factors), 3)
assert_almost_equal(factors[0], np.poly([2, 2, 3]))
assert_almost_equal(factors[1], np.poly([1, 1, 1, 3]))
assert_almost_equal(factors[2], np.poly([1, 1, 1, 2, 2]))
assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3]))
factors, poly = _compute_factors([1, 2, 3], [3, 2, 1],
include_powers=True)
assert_equal(len(factors), 6)
assert_almost_equal(factors[0], np.poly([1, 1, 2, 2, 3]))
assert_almost_equal(factors[1], np.poly([1, 2, 2, 3]))
assert_almost_equal(factors[2], np.poly([2, 2, 3]))
assert_almost_equal(factors[3], np.poly([1, 1, 1, 2, 3]))
assert_almost_equal(factors[4], np.poly([1, 1, 1, 3]))
assert_almost_equal(factors[5], np.poly([1, 1, 1, 2, 2]))
assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3]))
def test_group_poles(self):
unique, multiplicity = _group_poles(
[1.0, 1.001, 1.003, 2.0, 2.003, 3.0], 0.1, 'min')
assert_equal(unique, [1.0, 2.0, 3.0])
assert_equal(multiplicity, [3, 2, 1])
def test_residue_general(self):
# Test are taken from issue #4464, note that poles in scipy are
# in increasing by absolute value order, opposite to MATLAB.
r, p, k = residue([5, 3, -2, 7], [-4, 0, 8, 3])
assert_almost_equal(r, [1.3320, -0.6653, -1.4167], decimal=4)
assert_almost_equal(p, [-0.4093, -1.1644, 1.5737], decimal=4)
assert_almost_equal(k, [-1.2500], decimal=4)
r, p, k = residue([-4, 8], [1, 6, 8])
assert_almost_equal(r, [8, -12])
assert_almost_equal(p, [-2, -4])
assert_equal(k.size, 0)
r, p, k = residue([4, 1], [1, -1, -2])
assert_almost_equal(r, [1, 3])
assert_almost_equal(p, [-1, 2])
assert_equal(k.size, 0)
r, p, k = residue([4, 3], [2, -3.4, 1.98, -0.406])
self.assert_rp_almost_equal(
r, p, [-18.125 - 13.125j, -18.125 + 13.125j, 36.25],
[0.5 - 0.2j, 0.5 + 0.2j, 0.7])
assert_equal(k.size, 0)
r, p, k = residue([2, 1], [1, 5, 8, 4])
self.assert_rp_almost_equal(r, p, [-1, 1, 3], [-1, -2, -2])
assert_equal(k.size, 0)
r, p, k = residue([3, -1.1, 0.88, -2.396, 1.348],
[1, -0.7, -0.14, 0.048])
assert_almost_equal(r, [-3, 4, 1])
assert_almost_equal(p, [0.2, -0.3, 0.8])
assert_almost_equal(k, [3, 1])
r, p, k = residue([1], [1, 2, -3])
assert_almost_equal(r, [0.25, -0.25])
assert_almost_equal(p, [1, -3])
assert_equal(k.size, 0)
r, p, k = residue([1, 0, -5], [1, 0, 0, 0, -1])
self.assert_rp_almost_equal(r, p,
[1, 1.5j, -1.5j, -1], [-1, -1j, 1j, 1])
assert_equal(k.size, 0)
r, p, k = residue([3, 8, 6], [1, 3, 3, 1])
self.assert_rp_almost_equal(r, p, [1, 2, 3], [-1, -1, -1])
assert_equal(k.size, 0)
r, p, k = residue([3, -1], [1, -3, 2])
assert_almost_equal(r, [-2, 5])
assert_almost_equal(p, [1, 2])
assert_equal(k.size, 0)
r, p, k = residue([2, 3, -1], [1, -3, 2])
assert_almost_equal(r, [-4, 13])
assert_almost_equal(p, [1, 2])
assert_almost_equal(k, [2])
r, p, k = residue([7, 2, 3, -1], [1, -3, 2])
assert_almost_equal(r, [-11, 69])
assert_almost_equal(p, [1, 2])
assert_almost_equal(k, [7, 23])
r, p, k = residue([2, 3, -1], [1, -3, 4, -2])
self.assert_rp_almost_equal(r, p, [4, -1 + 3.5j, -1 - 3.5j],
[1, 1 - 1j, 1 + 1j])
assert_almost_equal(k.size, 0)
def test_residue_leading_zeros(self):
# Leading zeros in numerator or denominator must not affect the answer.
r0, p0, k0 = residue([5, 3, -2, 7], [-4, 0, 8, 3])
r1, p1, k1 = residue([0, 5, 3, -2, 7], [-4, 0, 8, 3])
r2, p2, k2 = residue([5, 3, -2, 7], [0, -4, 0, 8, 3])
r3, p3, k3 = residue([0, 0, 5, 3, -2, 7], [0, 0, 0, -4, 0, 8, 3])
assert_almost_equal(r0, r1)
assert_almost_equal(r0, r2)
assert_almost_equal(r0, r3)
assert_almost_equal(p0, p1)
assert_almost_equal(p0, p2)
assert_almost_equal(p0, p3)
assert_almost_equal(k0, k1)
assert_almost_equal(k0, k2)
assert_almost_equal(k0, k3)
def test_resiude_degenerate(self):
# Several tests for zero numerator and denominator.
r, p, k = residue([0, 0], [1, 6, 8])
assert_almost_equal(r, [0, 0])
assert_almost_equal(p, [-2, -4])
assert_equal(k.size, 0)
r, p, k = residue(0, 1)
assert_equal(r.size, 0)
assert_equal(p.size, 0)
assert_equal(k.size, 0)
with pytest.raises(ValueError, match="Denominator `a` is zero."):
residue(1, 0)
def test_residuez_general(self):
r, p, k = residuez([1, 6, 6, 2], [1, -(2 + 1j), (1 + 2j), -1j])
self.assert_rp_almost_equal(r, p, [-2+2.5j, 7.5+7.5j, -4.5-12j],
[1j, 1, 1])
assert_almost_equal(k, [2j])
r, p, k = residuez([1, 2, 1], [1, -1, 0.3561])
self.assert_rp_almost_equal(r, p,
[-0.9041 - 5.9928j, -0.9041 + 5.9928j],
[0.5 + 0.3257j, 0.5 - 0.3257j],
decimal=4)
assert_almost_equal(k, [2.8082], decimal=4)
r, p, k = residuez([1, -1], [1, -5, 6])
assert_almost_equal(r, [-1, 2])
assert_almost_equal(p, [2, 3])
assert_equal(k.size, 0)
r, p, k = residuez([2, 3, 4], [1, 3, 3, 1])
self.assert_rp_almost_equal(r, p, [4, -5, 3], [-1, -1, -1])
assert_equal(k.size, 0)
r, p, k = residuez([1, -10, -4, 4], [2, -2, -4])
assert_almost_equal(r, [0.5, -1.5])
assert_almost_equal(p, [-1, 2])
assert_almost_equal(k, [1.5, -1])
r, p, k = residuez([18], [18, 3, -4, -1])
self.assert_rp_almost_equal(r, p,
[0.36, 0.24, 0.4], [0.5, -1/3, -1/3])
assert_equal(k.size, 0)
r, p, k = residuez([2, 3], np.polymul([1, -1/2], [1, 1/4]))
assert_almost_equal(r, [-10/3, 16/3])
assert_almost_equal(p, [-0.25, 0.5])
assert_equal(k.size, 0)
r, p, k = residuez([1, -2, 1], [1, -1])
assert_almost_equal(r, [0])
assert_almost_equal(p, [1])
assert_almost_equal(k, [1, -1])
r, p, k = residuez(1, [1, -1j])
assert_almost_equal(r, [1])
assert_almost_equal(p, [1j])
assert_equal(k.size, 0)
r, p, k = residuez(1, [1, -1, 0.25])
assert_almost_equal(r, [0, 1])
assert_almost_equal(p, [0.5, 0.5])
assert_equal(k.size, 0)
r, p, k = residuez(1, [1, -0.75, .125])
assert_almost_equal(r, [-1, 2])
assert_almost_equal(p, [0.25, 0.5])
assert_equal(k.size, 0)
r, p, k = residuez([1, 6, 2], [1, -2, 1])
assert_almost_equal(r, [-10, 9])
assert_almost_equal(p, [1, 1])
assert_almost_equal(k, [2])
r, p, k = residuez([6, 2], [1, -2, 1])
assert_almost_equal(r, [-2, 8])
assert_almost_equal(p, [1, 1])
assert_equal(k.size, 0)
r, p, k = residuez([1, 6, 6, 2], [1, -2, 1])
assert_almost_equal(r, [-24, 15])
assert_almost_equal(p, [1, 1])
assert_almost_equal(k, [10, 2])
r, p, k = residuez([1, 0, 1], [1, 0, 0, 0, 0, -1])
self.assert_rp_almost_equal(r, p,
[0.2618 + 0.1902j, 0.2618 - 0.1902j,
0.4, 0.0382 - 0.1176j, 0.0382 + 0.1176j],
[-0.8090 + 0.5878j, -0.8090 - 0.5878j,
1.0, 0.3090 + 0.9511j, 0.3090 - 0.9511j],
decimal=4)
assert_equal(k.size, 0)
def test_residuez_trailing_zeros(self):
# Trailing zeros in numerator or denominator must not affect the
# answer.
r0, p0, k0 = residuez([5, 3, -2, 7], [-4, 0, 8, 3])
r1, p1, k1 = residuez([5, 3, -2, 7, 0], [-4, 0, 8, 3])
r2, p2, k2 = residuez([5, 3, -2, 7], [-4, 0, 8, 3, 0])
r3, p3, k3 = residuez([5, 3, -2, 7, 0, 0], [-4, 0, 8, 3, 0, 0, 0])
assert_almost_equal(r0, r1)
assert_almost_equal(r0, r2)
assert_almost_equal(r0, r3)
assert_almost_equal(p0, p1)
assert_almost_equal(p0, p2)
assert_almost_equal(p0, p3)
assert_almost_equal(k0, k1)
assert_almost_equal(k0, k2)
assert_almost_equal(k0, k3)
def test_residuez_degenerate(self):
r, p, k = residuez([0, 0], [1, 6, 8])
assert_almost_equal(r, [0, 0])
assert_almost_equal(p, [-2, -4])
assert_equal(k.size, 0)
r, p, k = residuez(0, 1)
assert_equal(r.size, 0)
assert_equal(p.size, 0)
assert_equal(k.size, 0)
with pytest.raises(ValueError, match="Denominator `a` is zero."):
residuez(1, 0)
with pytest.raises(ValueError,
match="First coefficient of determinant `a` must "
"be non-zero."):
residuez(1, [0, 1, 2, 3])
def test_inverse_unique_roots_different_rtypes(self):
# This test was inspired by github issue 2496.
r = [3 / 10, -1 / 6, -2 / 15]
p = [0, -2, -5]
k = []
b_expected = [0, 1, 3]
a_expected = [1, 7, 10, 0]
# With the default tolerance, the rtype does not matter
# for this example.
for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'):
b, a = invres(r, p, k, rtype=rtype)
assert_allclose(b, b_expected)
assert_allclose(a, a_expected)
b, a = invresz(r, p, k, rtype=rtype)
assert_allclose(b, b_expected)
assert_allclose(a, a_expected)
def test_inverse_repeated_roots_different_rtypes(self):
r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
p = [0, -2, -2, -5]
k = []
b_expected = [0, 0, 1, 3]
b_expected_z = [-1/6, -2/3, 11/6, 3]
a_expected = [1, 9, 24, 20, 0]
for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'):
b, a = invres(r, p, k, rtype=rtype)
assert_allclose(b, b_expected, atol=1e-14)
assert_allclose(a, a_expected)
b, a = invresz(r, p, k, rtype=rtype)
assert_allclose(b, b_expected_z, atol=1e-14)
assert_allclose(a, a_expected)
def test_inverse_bad_rtype(self):
r = [3 / 20, -7 / 36, -1 / 6, 2 / 45]
p = [0, -2, -2, -5]
k = []
with pytest.raises(ValueError, match="`rtype` must be one of"):
invres(r, p, k, rtype='median')
with pytest.raises(ValueError, match="`rtype` must be one of"):
invresz(r, p, k, rtype='median')
def test_invresz_one_coefficient_bug(self):
# Regression test for issue in gh-4646.
r = [1]
p = [2]
k = [0]
b, a = invresz(r, p, k)
assert_allclose(b, [1.0])
assert_allclose(a, [1.0, -2.0])
def test_invres(self):
b, a = invres([1], [1], [])
assert_almost_equal(b, [1])
assert_almost_equal(a, [1, -1])
b, a = invres([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], [])
assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j])
assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j])
b, a = invres([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3])
assert_almost_equal(b, [1, -1 - 1j, 1 - 2j, 0.5 - 3j, 10])
assert_almost_equal(a, [1, -3 - 1j, 4])
b, a = invres([-1, 2, 1j, 3 - 1j, 4, -2],
[-1, 2 - 1j, 2 - 1j, 3, 3, 3], [])
assert_almost_equal(b, [4 - 1j, -28 + 16j, 40 - 62j, 100 + 24j,
-292 + 219j, 192 - 268j])
assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j,
108 - 54j, -81 + 108j])
b, a = invres([-1, 1j], [1, 1], [1, 2])
assert_almost_equal(b, [1, 0, -4, 3 + 1j])
assert_almost_equal(a, [1, -2, 1])
def test_invresz(self):
b, a = invresz([1], [1], [])
assert_almost_equal(b, [1])
assert_almost_equal(a, [1, -1])
b, a = invresz([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], [])
assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j])
assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j])
b, a = invresz([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3])
assert_almost_equal(b, [2.5, -3 - 1j, 1 - 2j, -1 - 3j, 12])
assert_almost_equal(a, [1, -3 - 1j, 4])
b, a = invresz([-1, 2, 1j, 3 - 1j, 4, -2],
[-1, 2 - 1j, 2 - 1j, 3, 3, 3], [])
assert_almost_equal(b, [6, -50 + 11j, 100 - 72j, 80 + 58j,
-354 + 228j, 234 - 297j])
assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j,
108 - 54j, -81 + 108j])
b, a = invresz([-1, 1j], [1, 1], [1, 2])
assert_almost_equal(b, [1j, 1, -3, 2])
assert_almost_equal(a, [1, -2, 1])
def test_inverse_scalar_arguments(self):
b, a = invres(1, 1, 1)
assert_almost_equal(b, [1, 0])
assert_almost_equal(a, [1, -1])
b, a = invresz(1, 1, 1)
assert_almost_equal(b, [2, -1])
assert_almost_equal(a, [1, -1])
class TestVectorstrength(object):
def test_single_1dperiod(self):
events = np.array([.5])
period = 5.
targ_strength = 1.
targ_phase = .1
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_single_2dperiod(self):
events = np.array([.5])
period = [1, 2, 5.]
targ_strength = [1.] * 3
targ_phase = np.array([.5, .25, .1])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_array_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_equal_1dperiod(self):
events = np.array([.25, .25, .25, .25, .25, .25])
period = 2
targ_strength = 1.
targ_phase = .125
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_equal_2dperiod(self):
events = np.array([.25, .25, .25, .25, .25, .25])
period = [1, 2, ]
targ_strength = [1.] * 2
targ_phase = np.array([.25, .125])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_spaced_1dperiod(self):
events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
period = 1
targ_strength = 1.
targ_phase = .1
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_spaced_2dperiod(self):
events = np.array([.1, 1.1, 2.1, 4.1, 10.1])
period = [1, .5]
targ_strength = [1.] * 2
targ_phase = np.array([.1, .2])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_partial_1dperiod(self):
events = np.array([.25, .5, .75])
period = 1
targ_strength = 1. / 3.
targ_phase = .5
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_partial_2dperiod(self):
events = np.array([.25, .5, .75])
period = [1., 1., 1., 1.]
targ_strength = [1. / 3.] * 4
targ_phase = np.array([.5, .5, .5, .5])
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
assert_almost_equal(phase, 2 * np.pi * targ_phase)
def test_opposite_1dperiod(self):
events = np.array([0, .25, .5, .75])
period = 1.
targ_strength = 0
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 0)
assert_equal(phase.ndim, 0)
assert_almost_equal(strength, targ_strength)
def test_opposite_2dperiod(self):
events = np.array([0, .25, .5, .75])
period = [1.] * 10
targ_strength = [0.] * 10
strength, phase = vectorstrength(events, period)
assert_equal(strength.ndim, 1)
assert_equal(phase.ndim, 1)
assert_almost_equal(strength, targ_strength)
def test_2d_events_ValueError(self):
events = np.array([[1, 2]])
period = 1.
assert_raises(ValueError, vectorstrength, events, period)
def test_2d_period_ValueError(self):
events = 1.
period = np.array([[1]])
assert_raises(ValueError, vectorstrength, events, period)
def test_zero_period_ValueError(self):
events = 1.
period = 0
assert_raises(ValueError, vectorstrength, events, period)
def test_negative_period_ValueError(self):
events = 1.
period = -1
assert_raises(ValueError, vectorstrength, events, period)
def cast_tf2sos(b, a):
"""Convert TF2SOS, casting to complex128 and back to the original dtype."""
# tf2sos does not support all of the dtypes that we want to check, e.g.:
#
# TypeError: array type complex256 is unsupported in linalg
#
# so let's cast, convert, and cast back -- should be fine for the
# systems and precisions we are testing.
dtype = np.asarray(b).dtype
b = np.array(b, np.complex128)
a = np.array(a, np.complex128)
return tf2sos(b, a).astype(dtype)
def assert_allclose_cast(actual, desired, rtol=1e-7, atol=0):
"""Wrap assert_allclose while casting object arrays."""
if actual.dtype.kind == 'O':
dtype = np.array(actual.flat[0]).dtype
actual, desired = actual.astype(dtype), desired.astype(dtype)
assert_allclose(actual, desired, rtol, atol)
@pytest.mark.parametrize('func', (sosfilt, lfilter))
def test_nonnumeric_dtypes(func):
x = [Decimal(1), Decimal(2), Decimal(3)]
b = [Decimal(1), Decimal(2), Decimal(3)]
a = [Decimal(1), Decimal(2), Decimal(3)]
x = np.array(x)
assert x.dtype.kind == 'O'
desired = lfilter(np.array(b, float), np.array(a, float), x.astype(float))
if func is sosfilt:
actual = sosfilt([b + a], x)
else:
actual = lfilter(b, a, x)
assert all(isinstance(x, Decimal) for x in actual)
assert_allclose(actual.astype(float), desired.astype(float))
# Degenerate cases
if func is lfilter:
args = [1., 1.]
else:
args = [tf2sos(1., 1.)]
with pytest.raises(NotImplementedError,
match='input type .* not supported'):
func(*args, x=['foo'])
with pytest.raises(ValueError, match='must be at least 1-D'):
func(*args, x=1.)
@pytest.mark.parametrize('dt', 'fdgFDGO')
class TestSOSFilt(object):
# The test_rank* tests are pulled from _TestLinearFilter
def test_rank1(self, dt):
x = np.linspace(0, 5, 6).astype(dt)
b = np.array([1, -1]).astype(dt)
a = np.array([0.5, -0.5]).astype(dt)
# Test simple IIR
y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(dt)
sos = cast_tf2sos(b, a)
assert_array_almost_equal(sosfilt(cast_tf2sos(b, a), x), y_r)
# Test simple FIR
b = np.array([1, 1]).astype(dt)
# NOTE: This was changed (rel. to TestLinear...) to add a pole @zero:
a = np.array([1, 0]).astype(dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(dt)
assert_array_almost_equal(sosfilt(cast_tf2sos(b, a), x), y_r)
b = [1, 1, 0]
a = [1, 0, 0]
x = np.ones(8)
sos = np.concatenate((b, a))
sos.shape = (1, 6)
y = sosfilt(sos, x)
assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2])
def test_rank2(self, dt):
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(dt)
b = np.array([1, -1]).astype(dt)
a = np.array([0.5, 0.5]).astype(dt)
y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]],
dtype=dt)
y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]], dtype=dt)
y = sosfilt(cast_tf2sos(b, a), x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
y = sosfilt(cast_tf2sos(b, a), x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank3(self, dt):
shape = (4, 3, 2)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
b = np.array([1, -1]).astype(dt)
a = np.array([0.5, 0.5]).astype(dt)
# Test last axis
y = sosfilt(cast_tf2sos(b, a), x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j]))
def test_initial_conditions(self, dt):
b1, a1 = signal.butter(2, 0.25, 'low')
b2, a2 = signal.butter(2, 0.75, 'low')
b3, a3 = signal.butter(2, 0.75, 'low')
b = np.convolve(np.convolve(b1, b2), b3)
a = np.convolve(np.convolve(a1, a2), a3)
sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3]))
x = np.random.rand(50).astype(dt)
# Stopping filtering and continuing
y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6))
y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]]
assert_allclose_cast(y_true, lfilter(b, a, x))
y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2)))
y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]]
assert_allclose_cast(y_true, y_sos)
# Use a step function
zi = sosfilt_zi(sos)
x = np.ones(8, dt)
y, zf = sosfilt(sos, x, zi=zi)
assert_allclose_cast(y, np.ones(8))
assert_allclose_cast(zf, zi)
# Initial condition shape matching
x.shape = (1, 1) + x.shape # 3D
assert_raises(ValueError, sosfilt, sos, x, zi=zi)
zi_nd = zi.copy()
zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1])
assert_raises(ValueError, sosfilt, sos, x,
zi=zi_nd[:, :, :, [0, 1, 1]])
y, zf = sosfilt(sos, x, zi=zi_nd)
assert_allclose_cast(y[0, 0], np.ones(8))
assert_allclose_cast(zf[:, 0, 0, :], zi)
def test_initial_conditions_3d_axis1(self, dt):
# Test the use of zi when sosfilt is applied to axis 1 of a 3-d input.
# Input array is x.
x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3))
x = x.astype(dt)
# Design a filter in ZPK format and convert to SOS
zpk = signal.butter(6, 0.35, output='zpk')
sos = zpk2sos(*zpk)
nsections = sos.shape[0]
# Filter along this axis.
axis = 1
# Initial conditions, all zeros.
shp = list(x.shape)
shp[axis] = 2
shp = [nsections] + shp
z0 = np.zeros(shp)
# Apply the filter to x.
yf, zf = sosfilt(sos, x, axis=axis, zi=z0)
# Apply the filter to x in two stages.
y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0)
y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1)
# y should equal yf, and z2 should equal zf.
y = np.concatenate((y1, y2), axis=axis)
assert_allclose_cast(y, yf, rtol=1e-10, atol=1e-13)
assert_allclose_cast(z2, zf, rtol=1e-10, atol=1e-13)
# let's try the "step" initial condition
zi = sosfilt_zi(sos)
zi.shape = [nsections, 1, 2, 1]
zi = zi * x[:, 0:1, :]
y = sosfilt(sos, x, axis=axis, zi=zi)[0]
# check it against the TF form
b, a = zpk2tf(*zpk)
zi = lfilter_zi(b, a)
zi.shape = [1, zi.size, 1]
zi = zi * x[:, 0:1, :]
y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0]
assert_allclose_cast(y, y_tf, rtol=1e-10, atol=1e-13)
def test_bad_zi_shape(self, dt):
# The shape of zi is checked before using any values in the
# arguments, so np.empty is fine for creating the arguments.
x = np.empty((3, 15, 3), dt)
sos = np.zeros((4, 6))
zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3)
with pytest.raises(ValueError, match='should be all ones'):
sosfilt(sos, x, zi=zi, axis=1)
sos[:, 3] = 1.
with pytest.raises(ValueError, match='Invalid zi shape'):
sosfilt(sos, x, zi=zi, axis=1)
def test_sosfilt_zi(self, dt):
sos = signal.butter(6, 0.2, output='sos')
zi = sosfilt_zi(sos)
y, zf = sosfilt(sos, np.ones(40, dt), zi=zi)
assert_allclose_cast(zf, zi, rtol=1e-13)
# Expected steady state value of the step response of this filter:
ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1))
assert_allclose_cast(y, ss, rtol=1e-13)
# zi as array-like
_, zf = sosfilt(sos, np.ones(40, dt), zi=zi.tolist())
assert_allclose_cast(zf, zi, rtol=1e-13)
class TestDeconvolve(object):
def test_basic(self):
# From docstring example
original = [0, 1, 0, 0, 1, 1, 0, 0]
impulse_response = [2, 1]
recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0]
recovered, remainder = signal.deconvolve(recorded, impulse_response)
assert_allclose(recovered, original)
class TestDetrend(object):
def test_basic(self):
detrended = detrend(array([1, 2, 3]))
detrended_exact = array([0, 0, 0])
assert_array_almost_equal(detrended, detrended_exact)
def test_copy(self):
x = array([1, 1.2, 1.5, 1.6, 2.4])
copy_array = detrend(x, overwrite_data=False)
inplace = detrend(x, overwrite_data=True)
assert_array_almost_equal(copy_array, inplace)
class TestUniqueRoots(object):
def test_real_no_repeat(self):
p = [-1.0, -0.5, 0.3, 1.2, 10.0]
unique, multiplicity = unique_roots(p)
assert_almost_equal(unique, p, decimal=15)
assert_equal(multiplicity, np.ones(len(p)))
def test_real_repeat(self):
p = [-1.0, -0.95, -0.89, -0.8, 0.5, 1.0, 1.05]
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min')
assert_almost_equal(unique, [-1.0, -0.89, 0.5, 1.0], decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max')
assert_almost_equal(unique, [-0.95, -0.8, 0.5, 1.05], decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg')
assert_almost_equal(unique, [-0.975, -0.845, 0.5, 1.025], decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
def test_complex_no_repeat(self):
p = [-1.0, 1.0j, 0.5 + 0.5j, -1.0 - 1.0j, 3.0 + 2.0j]
unique, multiplicity = unique_roots(p)
assert_almost_equal(unique, p, decimal=15)
assert_equal(multiplicity, np.ones(len(p)))
def test_complex_repeat(self):
p = [-1.0, -1.0 + 0.05j, -0.95 + 0.15j, -0.90 + 0.15j, 0.0,
0.5 + 0.5j, 0.45 + 0.55j]
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min')
assert_almost_equal(unique, [-1.0, -0.95 + 0.15j, 0.0, 0.45 + 0.55j],
decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max')
assert_almost_equal(unique,
[-1.0 + 0.05j, -0.90 + 0.15j, 0.0, 0.5 + 0.5j],
decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg')
assert_almost_equal(
unique, [-1.0 + 0.025j, -0.925 + 0.15j, 0.0, 0.475 + 0.525j],
decimal=15)
assert_equal(multiplicity, [2, 2, 1, 2])
def test_gh_4915(self):
p = np.roots(np.convolve(np.ones(5), np.ones(5)))
true_roots = [-(-1)**(1/5), (-1)**(4/5), -(-1)**(3/5), (-1)**(2/5)]
unique, multiplicity = unique_roots(p)
unique = np.sort(unique)
assert_almost_equal(np.sort(unique), true_roots, decimal=7)
assert_equal(multiplicity, [2, 2, 2, 2])
def test_complex_roots_extra(self):
unique, multiplicity = unique_roots([1.0, 1.0j, 1.0])
assert_almost_equal(unique, [1.0, 1.0j], decimal=15)
assert_equal(multiplicity, [2, 1])
unique, multiplicity = unique_roots([1, 1 + 2e-9, 1e-9 + 1j], tol=0.1)
assert_almost_equal(unique, [1.0, 1e-9 + 1.0j], decimal=15)
assert_equal(multiplicity, [2, 1])
def test_single_unique_root(self):
p = np.random.rand(100) + 1j * np.random.rand(100)
unique, multiplicity = unique_roots(p, 2)
assert_almost_equal(unique, [np.min(p)], decimal=15)
assert_equal(multiplicity, [100])
|
ZeusbaseWeb/zeusbasePHP | refs/heads/master | frameworks/PHPWord-0.12.0/docs/conf.py | 33 | # -*- coding: utf-8 -*-
#
# PhpWord documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 14 23:09:26 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PhpWord'
copyright = u'2014, PHPWord Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.12.0'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PhpWorddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PhpWord.tex', u'PhpWord Documentation',
u'The PhpWord Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'PhpWord', u'PhpWord Documentation',
[u'The PhpWord Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PhpWord', u'PhpWord Documentation',
u'The PhpWord Team', 'PhpWord', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'PhpWord'
epub_author = u'The PhpWord Team'
epub_publisher = u'The PhpWord Team'
epub_copyright = copyright
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Highlight PHP without starting <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
|
stopstalk/stopstalk-deployment | refs/heads/master | private/scripts/todays_contest_logs.py | 1 | """
Copyright (c) 2015-2020 Raj Patel(raj454raj@gmail.com), StopStalk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
db = current.db
cltable = db.contest_logging
records = db(cltable).select()
# Construct mail message body
message = ""
for i in records:
message += "%s %s %s %s" % (i.click_type,
i.contest_details,
i.stopstalk_handle,
str(i.time_stamp))
message += "\n"
# Empty the sessions table
cltable.truncate()
# Send mail to the admin
current.send_mail(to="raj454raj@gmail.com",
subject="Today's contest logs",
message=message,
mail_type="admin",
bulk=True)
|
bugaevc/thefuck | refs/heads/master | tests/rules/test_python_execute.py | 17 | import pytest
from thefuck.rules.python_execute import match, get_new_command
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(script='python foo'),
Command(script='python bar')])
def test_match(command):
assert match(command, None)
@pytest.mark.parametrize('command, new_command', [
(Command('python foo'), 'python foo.py'),
(Command('python bar'), 'python bar.py')])
def test_get_new_command(command, new_command):
assert get_new_command(command, None) == new_command
|
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | refs/heads/master | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/lib2to3/pgen2/token.py | 353 | #! /usr/bin/env python
"""Token constants (from "token.h")."""
# Taken from Python (r53757) and modified to include some tokens
# originally monkeypatched in by pgen2.tokenize
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
COMMENT = 52
NL = 53
RARROW = 54
ERRORTOKEN = 55
N_TOKENS = 56
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
|
kundancool/android_kernel_xiaomi_msm8974 | refs/heads/cm-13.0 | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
alaski/nova | refs/heads/master | nova/conf/network.py | 1 | # needs:fix_opt_description
# needs:check_deprecation_status
# needs:check_opt_group_and_type
# needs:fix_opt_description_indentation
# needs:fix_opt_registration_consistency
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.conf import paths
NOVA_NET_API = 'nova.network.api.API'
network_opts = [
cfg.StrOpt("flat_network_bridge",
help="""
This option determines the bridge used for simple network interfaces when no
bridge is specified in the VM creation request.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any string representing a valid network bridge, such as 'br100'
Related options:
``use_neutron``
"""),
cfg.StrOpt("flat_network_dns",
default="8.8.4.4",
help="""
This is the address of the DNS server for a simple network. If this option is
not specified, the default of '8.8.4.4' is used.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any valid IP address.
Related options:
``use_neutron``
"""),
cfg.BoolOpt("flat_injected",
default=False,
help="""
This option determines whether the network setup information is injected into
the VM before it is booted. While it was originally designed to be used only by
nova-network, it is also used by the vmware and xenapi virt drivers to control
whether network information is injected into a VM.
"""),
cfg.StrOpt("flat_interface",
help="""
This option is the name of the virtual interface of the VM on which the bridge
will be built. While it was originally designed to be used only by
nova-network, it is also used by libvirt for the bridge interface name.
Possible values:
Any valid virtual interface name, such as 'eth0'
"""),
cfg.IntOpt("vlan_start",
default=100,
min=1,
max=4094,
help="""
This is the VLAN number used for private networks. Note that the when creating
the networks, if the specified number has already been assigned, nova-network
will increment this number until it finds an available VLAN.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment. It also will be ignored if the configuration option
for `network_manager` is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any integer between 1 and 4094. Values outside of that range will raise a
ValueError exception. Default = 100.
Related options:
``network_manager``, ``use_neutron``
"""),
cfg.StrOpt("vlan_interface",
help="""
This option is the name of the virtual interface of the VM on which the VLAN
bridge will be built. While it was originally designed to be used only by
nova-network, it is also used by libvirt and xenapi for the bridge interface
name.
Please note that this setting will be ignored in nova-network if the
configuration option for `network_manager` is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any valid virtual interface name, such as 'eth0'
"""),
cfg.IntOpt("num_networks",
default=1,
min=1,
help="""
This option represents the number of networks to create if not explicitly
specified when the network is created. The only time this is used is if a CIDR
is specified, but an explicit network_size is not. In that case, the subnets
are created by diving the IP address space of the CIDR by num_networks. The
resulting subnet sizes cannot be larger than the configuration option
`network_size`; in that event, they are reduced to `network_size`, and a
warning is logged.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any positive integer is technically valid, although there are practical
limits based upon available IP address space and virtual interfaces. The
default is 1.
Related options:
``use_neutron``, ``network_size``
"""),
cfg.StrOpt("vpn_ip",
default="$my_ip",
help="""
This is the public IP address for the cloudpipe VPN servers. It defaults to the
IP address of the host.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment. It also will be ignored if the configuration option
for `network_manager` is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any valid IP address. The default is $my_ip, the IP address of the VM.
Related options:
``network_manager``, ``use_neutron``, ``vpn_start``
"""),
cfg.PortOpt("vpn_start",
default=1000,
help="""
This is the port number to use as the first VPN port for private networks.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment. It also will be ignored if the configuration option
for `network_manager` is not set to the default of
'nova.network.manager.VlanManager', or if you specify a value the 'vpn_start'
parameter when creating a network.
Possible values:
Any integer representing a valid port number. The default is 1000.
Related options:
``use_neutron``, ``vpn_ip``, ``network_manager``
"""),
cfg.IntOpt("network_size",
default=256,
min=1,
help="""
This option determines the number of addresses in each private subnet.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any positive integer that is less than or equal to the available network
size. Note that if you are creating multiple networks, they must all fit in
the available IP address space. The default is 256.
Related options:
``use_neutron``, ``num_networks``
"""),
cfg.StrOpt("fixed_range_v6",
default="fd00::/48",
help="""
This option determines the fixed IPv6 address block when creating a network.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any valid IPv6 CIDR. The default value is "fd00::/48".
Related options:
``use_neutron``
"""),
cfg.StrOpt("gateway",
help="""
This is the default IPv4 gateway. It is used only in the testing suite.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any valid IP address.
Related options:
``use_neutron``, ``gateway_v6``
"""),
cfg.StrOpt("gateway_v6",
help="""
This is the default IPv6 gateway. It is used only in the testing suite.
Please note that this option is only used when using nova-network instead of
Neutron in your deployment.
Possible values:
Any valid IP address.
Related options:
``use_neutron``, ``gateway``
"""),
cfg.IntOpt("cnt_vpn_clients",
default=0,
min=0,
help="""
This option represents the number of IP addresses to reserve at the top of the
address range for VPN clients. It also will be ignored if the configuration
option for `network_manager` is not set to the default of
'nova.network.manager.VlanManager'.
Possible values:
Any integer, 0 or greater. The default is 0.
Related options:
``use_neutron``, ``network_manager``
"""),
cfg.IntOpt("fixed_ip_disassociate_timeout",
default=600,
min=0,
help="""
This is the number of seconds to wait before disassociating a deallocated fixed
IP address. This is only used with the nova-network service, and has no effect
when using neutron for networking.
Possible values:
Any integer, zero or greater. The default is 600 (10 minutes).
Related options:
``use_neutron``
"""),
cfg.IntOpt("create_unique_mac_address_attempts",
default=5,
min=1,
help="""
This option determines how many times nova-network will attempt to create a
unique MAC address before giving up and raising a
`VirtualInterfaceMacAddressException` error.
Possible values:
Any positive integer. The default is 5.
Related options:
``use_neutron``
"""),
cfg.BoolOpt("teardown_unused_network_gateway",
default=False,
help="""
Determines whether unused gateway devices, both VLAN and bridge, are deleted if
the network is in nova-network VLAN mode and is multi-hosted.
Related options:
``use_neutron``, ``vpn_ip``, ``fake_network``
"""),
cfg.BoolOpt("force_dhcp_release",
default=True,
help="""
When this option is True, a call is made to release the DHCP for the instance
when that instance is terminated.
Related options:
``use_neutron``
"""),
cfg.BoolOpt("update_dns_entries",
default=False,
help="""
When this option is True, whenever a DNS entry must be updated, a fanout cast
message is sent to all network hosts to update their DNS entries in multi-host
mode.
Related options:
``use_neutron``
"""),
cfg.IntOpt("dns_update_periodic_interval",
default=-1,
min=-1,
help="""
This option determines the time, in seconds, to wait between refreshing DNS
entries for the network.
Possible values:
Either -1 (default), or any positive integer. A negative value will disable
the updates.
Related options:
``use_neutron``
"""),
cfg.StrOpt("dhcp_domain",
default="novalocal",
help="""
This option allows you to specify the domain for the DHCP server.
Possible values:
Any string that is a valid domain name.
Related options:
``use_neutron``
"""),
cfg.StrOpt("l3_lib",
default="nova.network.l3.LinuxNetL3",
help="""
This option allows you to specify the L3 management library to be used.
Possible values:
Any dot-separated string that represents the import path to an L3
networking library.
Related options:
``use_neutron``
"""),
cfg.BoolOpt("share_dhcp_address",
default=False,
deprecated_for_removal=True,
deprecated_since='2014.2',
help="""
THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK.
If True in multi_host mode, all compute hosts share the same dhcp address. The
same IP address used for DHCP will be added on each nova-network node which is
only visible to the VMs on the same host.
The use of this configuration has been deprecated and may be removed in any
release after Mitaka. It is recommended that instead of relying on this option,
an explicit value should be passed to 'create_networks()' as a keyword argument
with the name 'share_address'.
"""),
cfg.BoolOpt('use_neutron',
default=False,
help="Whether to use Neutron or Nova Network as the back end "
"for networking. Defaults to False (indicating Nova "
"network).Set to True to use neutron.")
]
linux_net_opts = [
cfg.MultiStrOpt('dhcpbridge_flagfile',
default=['/etc/nova/nova-dhcpbridge.conf'],
help="""
This option is a list of full paths to one or more configuration files for
dhcpbridge. In most cases the default path of '/etc/nova/nova-dhcpbridge.conf'
should be sufficient, but if you have special needs for configuring dhcpbridge,
you can change or add to this list.
Possible values
A list of strings, where each string is the full path to a dhcpbridge
configuration file.
"""),
cfg.StrOpt('networks_path',
default=paths.state_path_def('networks'),
help="""
The location where the network configuration files will be kept. The default is
the 'networks' directory off of the location where nova's Python module is
installed.
Possible values
A string containing the full path to the desired configuration directory
"""),
cfg.StrOpt('public_interface',
default='eth0',
help="""
This is the name of the network interface for public IP addresses. The default
is 'eth0'.
Possible values:
Any string representing a network interface name
"""),
cfg.StrOpt('dhcpbridge',
default=paths.bindir_def('nova-dhcpbridge'),
help="""
The location of the binary nova-dhcpbridge. By default it is the binary named
'nova-dhcpbridge' that is installed with all the other nova binaries.
Possible values:
Any string representing the full path to the binary for dhcpbridge
"""),
cfg.StrOpt('routing_source_ip',
default='$my_ip',
help="""
This is the public IP address of the network host. It is used when creating a
SNAT rule.
Possible values:
Any valid IP address
Related options:
force_snat_range
"""),
cfg.IntOpt('dhcp_lease_time',
default=86400,
min=1,
help="""
The lifetime of a DHCP lease, in seconds. The default is 86400 (one day).
Possible values:
Any positive integer value.
"""),
cfg.MultiStrOpt("dns_server",
default=[],
help="""
Despite the singular form of the name of this option, it is actually a list of
zero or more server addresses that dnsmasq will use for DNS nameservers. If
this is not empty, dnsmasq will not read /etc/resolv.conf, but will only use
the servers specified in this option. If the option use_network_dns_servers is
True, the dns1 and dns2 servers from the network will be appended to this list,
and will be used as DNS servers, too.
Possible values:
A list of strings, where each string is either an IP address or a FQDN.
Related options:
use_network_dns_servers
"""),
cfg.BoolOpt("use_network_dns_servers",
default=False,
help="""
When this option is set to True, the dns1 and dns2 servers for the network
specified by the user on boot will be used for DNS, as well as any specified in
the `dns_server` option.
Related options:
dns_server
"""),
cfg.ListOpt("dmz_cidr",
default=[],
help="""
This option is a list of zero or more IP address ranges in your network's DMZ
that should be accepted.
Possible values:
A list of strings, each of which should be a valid CIDR.
"""),
cfg.MultiStrOpt("force_snat_range",
default=[],
help="""
This is a list of zero or more IP ranges that traffic from the
`routing_source_ip` will be SNATted to. If the list is empty, then no SNAT
rules are created.
Possible values:
A list of strings, each of which should be a valid CIDR.
Related options:
routing_source_ip
"""),
cfg.StrOpt("dnsmasq_config_file",
default="",
help="""
The path to the custom dnsmasq configuration file, if any.
Possible values:
The full path to the configuration file, or an empty string if there is no
custom dnsmasq configuration file.
"""),
cfg.StrOpt("linuxnet_interface_driver",
default="nova.network.linux_net.LinuxBridgeInterfaceDriver",
help="""
This is the class used as the ethernet device driver for linuxnet bridge
operations. The default value should be all you need for most cases, but if you
wish to use a customized class, set this option to the full dot-separated
import path for that class.
Possible values:
Any string representing a dot-separated class path that Nova can import.
"""),
cfg.StrOpt("linuxnet_ovs_integration_bridge",
default="br-int",
help="""
The name of the Open vSwitch bridge that is used with linuxnet when connecting
with Open vSwitch."
Possible values:
Any string representing a valid bridge name.
"""),
cfg.BoolOpt("send_arp_for_ha",
default=False,
help="""
When True, when a device starts up, and upon binding floating IP addresses, arp
messages will be sent to ensure that the arp caches on the compute hosts are
up-to-date.
Related options:
send_arp_for_ha_count
"""),
cfg.IntOpt("send_arp_for_ha_count",
default=3,
help="""
When arp messages are configured to be sent, they will be sent with the count
set to the value of this option. Of course, if this is set to zero, no arp
messages will be sent.
Possible values:
Any integer greater than or equal to 0
Related options:
send_arp_for_ha
"""),
cfg.BoolOpt("use_single_default_gateway",
default=False,
help="""
When set to True, only the firt nic of a VM will get its default gateway from
the DHCP server.
"""),
cfg.MultiStrOpt("forward_bridge_interface",
default=["all"],
help="""
One or more interfaces that bridges can forward traffic to. If any of the items
in this list is the special keyword 'all', then all traffic will be forwarded.
Possible values:
A list of zero or more interface names, or the word 'all'.
"""),
cfg.StrOpt("metadata_host",
default="$my_ip",
help="""
This option determines the IP address for the network metadata API server.
Possible values:
* Any valid IP address. The default is the address of the Nova API server.
Related options:
* metadata_port
"""),
cfg.PortOpt("metadata_port",
default=8775,
help="""
This option determines the port used for the metadata API server.
Related options:
* metadata_host
"""),
cfg.StrOpt("iptables_top_regex",
default="",
help="""
This expression, if defined, will select any matching iptables rules and place
them at the top when applying metadata changes to the rules.
Possible values:
* Any string representing a valid regular expression, or an empty string
Related options:
* iptables_bottom_regex
"""),
cfg.StrOpt("iptables_bottom_regex",
default="",
help="""
This expression, if defined, will select any matching iptables rules and place
them at the bottom when applying metadata changes to the rules.
Possible values:
* Any string representing a valid regular expression, or an empty string
Related options:
* iptables_top_regex
"""),
cfg.StrOpt("iptables_drop_action",
default="DROP",
help="""
By default, packets that do not pass the firewall are DROPped. In many cases,
though, an operator may find it more useful to change this from DROP to REJECT,
so that the user issuing those packets may have a better idea as to what's
going on, or LOGDROP in order to record the blocked traffic before DROPping.
Possible values:
* A string representing an iptables chain. The default is DROP.
"""),
cfg.IntOpt("ovs_vsctl_timeout",
default=120,
min=0,
help="""
This option represents the period of time, in seconds, that the ovs_vsctl calls
will wait for a response from the database before timing out. A setting of 0
means that the utility should wait forever for a response.
Possible values:
* Any positive integer if a limited timeout is desired, or zero if the
calls should wait forever for a response.
"""),
cfg.BoolOpt("fake_network",
default=False,
help="""
This option is used mainly in testing to avoid calls to the underlying network
utilities.
"""),
cfg.IntOpt("ebtables_exec_attempts",
default=3,
min=1,
help="""
This option determines the number of times to retry ebtables commands before
giving up. The minimum number of retries is 1.
Possible values:
* Any positive integer
Related options:
* ebtables_retry_interval
"""),
cfg.FloatOpt("ebtables_retry_interval",
default=1.0,
help="""
This option determines the time, in seconds, that the system will sleep in
between ebtables retries. Note that each successive retry waits a multiple of
this value, so for example, if this is set to the default of 1.0 seconds, and
ebtables_exec_attempts is 4, after the first failure, the system will sleep for
1 * 1.0 seconds, after the second failure it will sleep 2 * 1.0 seconds, and
after the third failure it will sleep 3 * 1.0 seconds.
Possible values:
* Any non-negative float or integer. Setting this to zero will result in no
waiting between attempts.
Related options:
* ebtables_exec_attempts
"""),
]
ldap_dns_opts = [
cfg.StrOpt('ldap_dns_url',
default='ldap://ldap.example.com:389',
help='URL for LDAP server which will store DNS entries'),
cfg.StrOpt('ldap_dns_user',
default='uid=admin,ou=people,dc=example,dc=org',
help='User for LDAP DNS'),
cfg.StrOpt('ldap_dns_password',
default='password',
help='Password for LDAP DNS',
secret=True),
cfg.StrOpt('ldap_dns_soa_hostmaster',
default='hostmaster@example.org',
help='Hostmaster for LDAP DNS driver Statement of Authority'),
cfg.MultiStrOpt('ldap_dns_servers',
default=['dns.example.org'],
help='DNS Servers for LDAP DNS driver'),
cfg.StrOpt('ldap_dns_base_dn',
default='ou=hosts,dc=example,dc=org',
help='Base DN for DNS entries in LDAP'),
cfg.StrOpt('ldap_dns_soa_refresh',
default='1800',
help='Refresh interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_retry',
default='3600',
help='Retry interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_expiry',
default='86400',
help='Expiry interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
cfg.StrOpt('ldap_dns_soa_minimum',
default='7200',
help='Minimum interval (in seconds) for LDAP DNS driver '
'Statement of Authority'),
]
driver_opts = [
cfg.StrOpt('network_driver',
default='nova.network.linux_net',
help='Driver to use for network creation'),
]
rpcapi_opts = [
cfg.StrOpt('network_topic',
default='network',
help='The topic network nodes listen on'),
cfg.BoolOpt('multi_host',
default=False,
help='Default value for multi_host in networks. Also, if set, '
'some rpc network calls will be sent directly to host.'),
]
ALL_DEFAULT_OPTS = (linux_net_opts + network_opts + ldap_dns_opts
+ rpcapi_opts + driver_opts)
def register_opts(conf):
conf.register_opts(linux_net_opts)
conf.register_opts(network_opts)
conf.register_opts(ldap_dns_opts)
conf.register_opts(driver_opts)
conf.register_opts(rpcapi_opts)
def list_opts():
return {"DEFAULT": ALL_DEFAULT_OPTS}
|
ryano144/intellij-community | refs/heads/master | python/testData/refactoring/extractsuperclass/moveExtendsCheckReference/source_module.after.py | 320 | from dest_module import NewParent
class MyClass(NewParent):
pass |
aptrishu/coala | refs/heads/master | coalib/settings/FunctionMetadata.py | 27 | from collections import OrderedDict
from copy import copy
from inspect import getfullargspec, ismethod
from coala_utils.decorators import enforce_signature
from coalib.settings.DocstringMetadata import DocstringMetadata
class FunctionMetadata:
str_nodesc = 'No description given.'
str_optional = "Optional, defaults to '{}'."
@enforce_signature
def __init__(self,
name: str,
desc: str='',
retval_desc: str='',
non_optional_params: (dict, None)=None,
optional_params: (dict, None)=None,
omit: (set, tuple, list, frozenset)=frozenset(),
deprecated_params: (set, tuple, list, frozenset)=frozenset()):
"""
Creates the FunctionMetadata object.
:param name: The name of the function.
:param desc: The description of the function.
:param retval_desc: The retval description of the function.
:param non_optional_params: A dict containing the name of non optional
parameters as the key and a tuple of a
description and the python annotation. To
preserve the order, use OrderedDict.
:param optional_params: A dict containing the name of optional
parameters as the key and a tuple
of a description, the python annotation and
the default value. To preserve the order,
use OrderedDict.
:param omit: A set of parameters to omit.
:param deprecared_params: A list of params that are deprecated.
"""
if non_optional_params is None:
non_optional_params = OrderedDict()
if optional_params is None:
optional_params = OrderedDict()
self.name = name
self._desc = desc
self.retval_desc = retval_desc
self._non_optional_params = non_optional_params
self._optional_params = optional_params
self.omit = set(omit)
self.deprecated_params = set(deprecated_params)
@property
def desc(self):
"""
Returns description of the function.
"""
return self._desc
@desc.setter
@enforce_signature
def desc(self, new_desc: str):
"""
Set's the description to the new_desc.
"""
self._desc = new_desc
def _filter_out_omitted(self, params):
"""
Filters out parameters that are to omit. This is a helper method for
the param related properties.
:param params: The parameter dictionary to filter.
:return: The filtered dictionary.
"""
return OrderedDict(filter(lambda p: p[0] not in self.omit,
tuple(params.items())))
@property
def non_optional_params(self):
"""
Retrieves a dict containing the name of non optional parameters as the
key and a tuple of a description and the python annotation. Values that
are present in self.omit will be omitted.
"""
return self._filter_out_omitted(self._non_optional_params)
@property
def optional_params(self):
"""
Retrieves a dict containing the name of optional parameters as the key
and a tuple of a description, the python annotation and the default
value. Values that are present in self.omit will be omitted.
"""
return self._filter_out_omitted(self._optional_params)
def add_deprecated_param(self, original, alias):
"""
Adds an alias for the original setting. The alias setting will have
the same metadata as the original one. If the original setting is not
optional, the alias will default to ``None``.
:param original: The name of the original setting.
:param alias: The name of the alias for the original.
:raises KeyError: If the new setting doesn't exist in the metadata.
"""
self.deprecated_params.add(alias)
self._optional_params[alias] = (
self._optional_params[original]
if original in self._optional_params
else self._non_optional_params[original] + (None, ))
def create_params_from_section(self, section):
"""
Create a params dictionary for this function that holds all values the
function needs plus optional ones that are available.
:param section: The section to retrieve the values from.
:return: The params dictionary.
"""
params = {}
for param in self.non_optional_params:
_, annotation = self.non_optional_params[param]
params[param] = self._get_param(param, section, annotation)
for param in self.optional_params:
if param in section:
_, annotation, _ = self.optional_params[param]
params[param] = self._get_param(param, section, annotation)
return params
@staticmethod
def _get_param(param, section, annotation):
def return_arg(x):
return x
if annotation is None:
annotation = return_arg
try:
return annotation(section[param])
except (TypeError, ValueError):
raise ValueError('Unable to convert parameter {!r} into type '
'{}.'.format(param, annotation))
@classmethod
def from_function(cls, func, omit=frozenset()):
"""
Creates a FunctionMetadata object from a function. Please note that any
variable argument lists are not supported. If you do not want the
first (usual named 'self') argument to appear please pass the method of
an actual INSTANCE of a class; passing the method of the class isn't
enough. Alternatively you can add "self" to the omit set.
:param func: The function. If __metadata__ of the unbound function is
present it will be copied and used, otherwise it will be
generated.
:param omit: A set of parameter names that are to be ignored.
:return: The FunctionMetadata object corresponding to the given
function.
"""
if hasattr(func, '__metadata__'):
metadata = copy(func.__metadata__)
metadata.omit = omit
return metadata
doc = func.__doc__ or ''
doc_comment = DocstringMetadata.from_docstring(doc)
non_optional_params = OrderedDict()
optional_params = OrderedDict()
argspec = getfullargspec(func)
args = () if argspec.args is None else argspec.args
defaults = () if argspec.defaults is None else argspec.defaults
num_non_defaults = len(args) - len(defaults)
for i, arg in enumerate(args):
# Implicit self argument or omitted explicitly
if i < 1 and ismethod(func):
continue
if i < num_non_defaults:
non_optional_params[arg] = (
doc_comment.param_dict.get(arg, cls.str_nodesc),
argspec.annotations.get(arg, None))
else:
optional_params[arg] = (
doc_comment.param_dict.get(arg, cls.str_nodesc) + ' (' +
cls.str_optional.format(
defaults[i-num_non_defaults]) + ')',
argspec.annotations.get(arg, None),
defaults[i-num_non_defaults])
return cls(name=func.__name__,
desc=doc_comment.desc,
retval_desc=doc_comment.retval_desc,
non_optional_params=non_optional_params,
optional_params=optional_params,
omit=omit)
def filter_parameters(self, dct):
"""
Filters the given dict for keys that are declared as parameters inside
this metadata (either optional or non-optional).
You can use this function to safely pass parameters from a given
dictionary:
>>> def multiply(a, b=2, c=0):
... return a * b + c
>>> metadata = FunctionMetadata.from_function(multiply)
>>> args = metadata.filter_parameters({'a': 10, 'b': 20, 'd': 30})
You can safely pass the arguments to the function now:
>>> multiply(**args) # 10 * 20
200
:param dct:
The dict to filter.
:return:
A new dict containing the filtered items.
"""
return {key: dct[key]
for key in (self.non_optional_params.keys() |
self.optional_params.keys())
if key in dct}
@classmethod
def merge(cls, *metadatas):
"""
Merges signatures of ``FunctionMetadata`` objects.
Parameter (either optional or non-optional) and non-parameter
descriptions are merged from left to right, meaning the right hand
metadata overrides the left hand one.
>>> def a(x, y):
... '''
... desc of *a*
... :param x: x of a
... :param y: y of a
... :return: 5*x*y
... '''
... return 5 * x * y
>>> def b(x):
... '''
... desc of *b*
... :param x: x of b
... :return: 100*x
... '''
... return 100 * x
>>> metadata1 = FunctionMetadata.from_function(a)
>>> metadata2 = FunctionMetadata.from_function(b)
>>> merged = FunctionMetadata.merge(metadata1, metadata2)
>>> merged.name
"<Merged signature of 'a', 'b'>"
>>> merged.desc
'desc of *b*'
>>> merged.retval_desc
'100*x'
>>> merged.non_optional_params['x'][0]
'x of b'
>>> merged.non_optional_params['y'][0]
'y of a'
:param metadatas:
The sequence of metadatas to merge.
:return:
A ``FunctionMetadata`` object containing the merged signature of
all given metadatas.
"""
# Collect the metadatas, as we operate on them more often and we want
# to support arbitrary sequences.
metadatas = tuple(metadatas)
merged_name = ('<Merged signature of ' +
', '.join(repr(metadata.name)
for metadata in metadatas) +
'>')
merged_desc = next((m.desc for m in reversed(metadatas) if m.desc), '')
merged_retval_desc = next(
(m.retval_desc for m in reversed(metadatas) if m.retval_desc), '')
merged_non_optional_params = {}
merged_optional_params = {}
for metadata in metadatas:
# Use the fields and not the properties to get also omitted
# parameters.
merged_non_optional_params.update(metadata._non_optional_params)
merged_optional_params.update(metadata._optional_params)
merged_omit = set.union(*(metadata.omit for metadata in metadatas))
merged_deprecated_params = set.union(*(
metadata.deprecated_params for metadata in metadatas))
return cls(merged_name,
merged_desc,
merged_retval_desc,
merged_non_optional_params,
merged_optional_params,
merged_omit,
merged_deprecated_params)
|
vrenaville/ngo-addons-backport | refs/heads/master | addons/mail/wizard/__init__.py | 438 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import invite
import mail_compose_message
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ramdesh/vesuvius_installer | refs/heads/master | tools/pfmon/pfparse.py | 10 | # pfparse.py
# Author: Owen Royall-Kahin (oroyallkahin@gmail.com)
# Reads off of a person finder feed, saves the data into a database,
# and sends out an email if any new items are added to the feed.
####### Imports #######
import sys, re, shutil, smtplib, urllib, datetime
import elementtree.ElementTree as ET
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
####### Constants #######
WORKING_DIR = './'
CONFIG = WORKING_DIR + "pfparse.conf"
CURRENT_FEED = WORKING_DIR + "current.xml"
FEED_HISTORY = WORKING_DIR + "history.txt"
LOG = WORKING_DIR + "pfparse.log"
COMMASPACE = ', '
ME = "owen.royall-kahin@nih.gov"
SMTP_SERVER = "mailfwd.nih.gov"
LANGUAGE = "en"
ns = '{http://www.w3.org/2005/Atom}'
gns = '{http://schemas.google.com/personfinder/2011}'
####### Globals #######
repo_url = "http://caveofdoom.dyndns.org/other.xml"
recipients = [] #["oroyallkahin@gmail.com", "owen.royall-kahin@nih.gov"]
####### Procedures #######
def load_conf(filename):
# modify globals with values in pfparse.conf
file = open(filename)
for line in file:
if line[0] == "#":
pass
else:
items = line.split("=")
if items[0] == "url":
global repo_url
repo_url = items[1]
elif items[0] == "storage":
db = items[1].strip()
elif items[0] == "recipients":
global recipients
recipients = items[1].strip().split(",")
# Timestamp log as event occurs, takes an optional message
def log(message="None"):
# Initialize list to contain file lines, contains latest entry
log_list = [str(datetime.datetime.now()) + " -Message: " + str(message) + "\n"]
try:
# Attempt to open file
log_file = open(LOG, "r")
# Dump file contents into a list
for line in list(log_file):
log_list.append(line)
except IOError as e:
if e.errno == 2:
pass
else:
raise Exception("Error accessing log file for reading. Reason: ", e)
try:
# Update the log file with the new contents
log_file = open(LOG, "w")
log_file.writelines(log_list)
log_file.close()
except Exception as e:
raise Exception("Error accessing log file for writing. Reason: ", e)
def add_to_history():
# Rotate feed file
history_list = list(open(CURRENT_FEED))
try:
# Attempt to open history file
history_file = open(FEED_HISTORY, "r")
for line in history_file:
history_list.append(line)
except IOError as e:
if e.errno == 2:
pass
else:
raise Exception("Error accessing history feed for reading. Reason: ", e)
try:
# Update the feed history file.
history_file = open(FEED_HISTORY, "w")
history_file.writelines(history_list)
history_file.close()
except Exception as e:
raise Exception("Error accessing history feed for writing. Reason: ", e)
# Return a set of english titles and identifiers from a feed in the form (title, key name)
# Input feeds must be in the form of an ElementTree
def get_titles(feed):
eset = set([])
# These are not needed if they are not expected to change.
# Both the Google Schema and Atom namespace are listed at the top.
# Get Atom XML tag
#ns = re.match("{.*}", feed.getroot().tag).group()
# Get the Google Schema tag
#gns = re.match("{.*}", repo.tag).group()
for element in list(feed.getroot()):
for title in element.findall(ns+'content/'+gns+'repo/'+gns+'title'):
titlestr = title.items()
if titlestr[0][1] == LANGUAGE:
s = (title.text, \
element.find(ns+'id').text.split('http://google.org/personfinder/')[1])
eset.add(s)
break
# Returns this set.
# Note that this will ignore events without posted <LANGUAGE> titles
return eset
# Takes in ???
def send_email(new_entries = ["Blank"], removed_entries = ['removed']):
msg = MIMEMultipart('alternative')
msg['To'] = COMMASPACE.join(recipients)
msg['From'] = ME
msg['Subject'] = "Email Notification of PF Change"
msg.preamble = "preamble - pf has been changed"
msg.epilogue = "epilogue"
plain = "This is a test message."
html = """\
<html>
<head></head>
<body>
<p>Hello,
<br>This is a test message that indicates the PF feed has been changed.
<br>The events below are in the form (title, key name)
<br>New Feeds:<br>
""" + \
COMMASPACE.join("Title: "+entry[0] + \
", Key: "+entry[1] for entry in list(new_entries)) + \
"""
<br>Removed Feeds:<br>
""" + \
COMMASPACE.join("Title: "+entry[0] + \
", Key: "+entry[1] for entry in list(removed_entries)) + \
"""
</p>
</body>
</html>
"""
# for each item in the entry's getiterator() print tag and text
msg.attach(MIMEText(plain, 'plain'))
msg.attach(MIMEText(html, 'html'))
s = smtplib.SMTP(SMTP_SERVER)
s.sendmail(ME, recipients, msg.as_string())
s.close()
def main():
# Runs main program loop.
try:
# Load config settings
load_conf(CONFIG)
# get feed
new_file_path = urllib.urlretrieve(repo_url)[0]
new = ET.parse(open(new_file_path))
old = ET.parse(CURRENT_FEED)
new_entries = list(get_titles(new) - get_titles(old))
removed_entries = list(get_titles(old) - get_titles(new))
# Check for differences
if (new_entries or removed_entries):
# New content detected;
# Log the event
log(new_entries)
#Rotate history files
add_to_history()
# Update 'current' definitions, currently disabled for testing purposes
#shutil.move(new_file_path, CURRENT_FEED)
# Notifier options
# Send email
send_email(new_entries, removed_entries)
# possibly update PL for a new event
return 0
return 1
except Exception as e: # Should also except url errors, expat errors
print "An error has occurred: ", e
return 1
return 0
if __name__ == '__main__':
exit(main())
|
malishevg/edugraph | refs/heads/master | cms/djangoapps/contentstore/features/signup.py | 46 | # pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
@step('I fill in the registration form$')
def i_fill_in_the_registration_form(step):
def fill_in_reg_form():
register_form = world.css_find('form#register_form')
register_form.find_by_name('email').fill('robot+studio@edx.org')
register_form.find_by_name('password').fill('test')
register_form.find_by_name('username').fill('robot-studio')
register_form.find_by_name('name').fill('Robot Studio')
register_form.find_by_name('terms_of_service').click()
world.retry_on_exception(fill_in_reg_form)
@step('I press the Create My Account button on the registration form$')
def i_press_the_button_on_the_registration_form(step):
submit_css = 'form#register_form button#submit'
world.css_click(submit_css)
@step('I should see an email verification prompt')
def i_should_see_an_email_verification_prompt(step):
world.css_has_text('h1.page-header', u'My Courses')
world.css_has_text('div.msg h3.title', u'We need to verify your email address')
@step(u'I fill in and submit the signin form$')
def i_fill_in_the_signin_form(step):
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill('robot+studio@edx.org')
login_form.find_by_name('password').fill('test')
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
|
unioslo/cerebrum | refs/heads/master | Cerebrum/modules/no/uit/Constants.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2002-2019 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Cerebrum Constants for UiT.
"""
from Cerebrum import Constants
from Cerebrum.Constants import (
_AuthoritativeSystemCode,
_OUPerspectiveCode,
_SpreadCode,
_QuarantineCode,
_EntityExternalIdCode,
_PersonAffiliationCode,
_PersonAffStatusCode,
_AccountCode,
_ContactInfoCode,
_AddressCode,
_AuthenticationCode,
)
from Cerebrum.modules.PosixConstants import _PosixShellCode
from Cerebrum.modules.EmailConstants import _EmailServerTypeCode
from Cerebrum.modules.EntityTrait import _EntityTraitCode
class Constants(Constants.Constants):
#
# External Identifiers
#
externalid_sys_x_id = _EntityExternalIdCode(
'SYS_X_ID',
Constants.Constants.entity_person,
'Internal sys_x identifier')
externalid_paga_ansattnr = _EntityExternalIdCode(
'PAGA_ANSATTNR',
Constants.Constants.entity_person,
'Internal PAGA identifier')
externalid_hifm_ansattnr = _EntityExternalIdCode(
'HIFM_ANSATTNR',
Constants.Constants.entity_person,
'Internal HIFM identifier')
externalid_sito_ansattnr = _EntityExternalIdCode(
'SITO_ANSATTNR',
Constants.Constants.entity_person,
'Internal SITO identifier')
externalid_sito_ou = _EntityExternalIdCode(
'SITO_OU',
Constants.Constants.entity_ou,
'internal sito ou identifier')
#
# Authoritative systems
#
system_hifm = _AuthoritativeSystemCode(
'HIFM',
'Høgskolen i Alta')
system_hitos = _AuthoritativeSystemCode(
'HITOS',
'Høgskolen i Tromsø')
system_x = _AuthoritativeSystemCode(
'SYS_X',
'Manuelt personal system')
system_tlf = _AuthoritativeSystemCode(
'TLF',
'Telefoni system')
system_sysacc = _AuthoritativeSystemCode(
'SYSACC',
'System Accounts')
system_paga = _AuthoritativeSystemCode(
'PAGA',
'PAGA')
system_sito = _AuthoritativeSystemCode(
'SITO',
'SITO')
system_flyt = _AuthoritativeSystemCode(
'FLYT',
'FLYT')
system_fs_derived = _AuthoritativeSystemCode(
'FS-auto',
'Utledet av FS data')
system_kr_reg = _AuthoritativeSystemCode(
'KR_REG',
'Kontakt- og reservasjonsregisteret')
system_intern_ice = _AuthoritativeSystemCode(
'INTERN_ICE',
'Internal (uit) source for ICE number')
#
# Account codes
#
account_test = _AccountCode('T', 'Testkonto')
account_felles_drift = _AccountCode('FD', 'Felles Drift')
account_felles_intern = _AccountCode('FI', 'Felles Intern')
account_kurs = _AccountCode('K', 'Kurs')
account_forening = _AccountCode('F', 'Forening')
account_maskin = _AccountCode('M', 'Maskin')
account_prosess = _AccountCode('P', 'Prosess')
account_uit_guest = _AccountCode('gjestebruker_uit', 'Manuell gjestekonto')
#
# Contact codes
#
contact_workphone2 = _ContactInfoCode(
'PHONE_WORK_2',
'Secondary Work Phone')
contact_room = _ContactInfoCode(
'ROOM@UIT',
'Location and room number')
contact_building = _ContactInfoCode(
'BYGG@UIT',
'Building name')
contact_sito_mobile = _ContactInfoCode(
'PHONE_SITO',
'sito employee phone')
contact_uit_mobile = _ContactInfoCode(
'PHONE_UIT',
'uit employee phone')
contact_ice_phone = _ContactInfoCode(
'ICE_PHONE',
'Phone number for alerts (varsler)')
#
# Address codes
#
address_location = _AddressCode('Lokasjon', 'Campus')
#
# OU Structure perspective
#
perspective_sito = _OUPerspectiveCode('SITO', 'SITO')
#
# Affiliations
#
# Employee affiliations
affiliation_ansatt_sito = _PersonAffiliationCode(
'SITO',
'Ansatt ved studentsamskipnaden i tromso')
affiliation_ansatt = _PersonAffiliationCode(
'ANSATT',
'Ansatt ved UiT (i følge LT)')
affiliation_flyt_ansatt_hih = _PersonAffiliationCode(
'ANSATT_HIH',
'Ansatt ved HiH')
affiliation_flyt_student_hih = _PersonAffiliationCode(
'STUDENT_HIH',
'Student ved HiH')
# Affiliation status
affiliation_status_flyt_hih_ansatt_faculty = _PersonAffStatusCode(
affiliation_ansatt,
'Ansatt HiH',
'Vitenskapelig')
affiliation_status_flyt_hih_ansatt_tekadm = _PersonAffStatusCode(
affiliation_ansatt,
'ansatt HiH',
'Teknisk/administrativt')
affiliation_status_flyt_hin_ansatt_faculty = _PersonAffStatusCode(
affiliation_ansatt,
'Ansatt HiN',
'Vitenskapelig')
affiliation_status_flyt_hin_ansatt_tekadm = _PersonAffStatusCode(
affiliation_ansatt,
'ansatt HiN',
'Teknisk/administrativt')
affiliation_status_timelonnet_fast = _PersonAffStatusCode(
affiliation_ansatt,
'Timelonnet fast',
'Fast ansatt på timelønn')
affiliation_status_timelonnet_midlertidig = _PersonAffStatusCode(
affiliation_ansatt,
'Timelonnet midl',
'Midlertidig ansatt på timelønn')
affiliation_status_ansatt_perm = _PersonAffStatusCode(
affiliation_ansatt,
'permisjon',
'Ansatt, for tiden i permisjon')
affiliation_status_flyt_ansatt_hifm = _PersonAffStatusCode(
affiliation_ansatt,
'ansatt HIFm',
'Ansatte fra Høyskolen i Alta')
affiliation_status_ansatt_sito = _PersonAffStatusCode(
affiliation_ansatt_sito,
'sito',
'Ansatt')
affiliation_status_ansatt_sys_x = _PersonAffStatusCode(
affiliation_ansatt,
'sys_x-ansatt',
'Manuelt gitt tilgang til AD (bør nyanseres)')
# Student affiliations
affiliation_student = _PersonAffiliationCode(
'STUDENT',
'Student ved UiT (i følge FS)')
affiliation_status_flyt_hih_student_aktiv = _PersonAffStatusCode(
affiliation_student,
'student HiH',
'Aktiv student')
affiliation_status_flyt_student_hifm = _PersonAffStatusCode(
affiliation_student,
'student HIFm',
'Student fra Høyskolen i Alta')
affiliation_status_flyt_hin_student_aktiv = _PersonAffStatusCode(
affiliation_student,
'student HiN',
'Aktiv student')
affiliation_status_student_soker = _PersonAffStatusCode(
affiliation_student,
'soker',
'Registrert med søknad i FS')
affiliation_status_student_sys_x = _PersonAffStatusCode(
affiliation_student,
'sys_x-student',
'Student Manuelt gitt tilgang til AD')
affiliation_status_student_tilbud = _PersonAffStatusCode(
affiliation_student,
'tilbud',
'Har fått tilbud om opptak')
affiliation_status_student_opptak = _PersonAffStatusCode(
affiliation_student,
'opptak',
'Har studierett ved studieprogram')
affiliation_status_student_ny = Constants._PersonAffStatusCode(
affiliation_student,
'ny',
'Registrert med ny, gyldig studierett i FS')
affiliation_status_student_perm = _PersonAffStatusCode(
affiliation_student,
'permisjon',
'Har gyldig permisjonstatus i FS')
affiliation_status_student_alumni = _PersonAffStatusCode(
affiliation_student,
'alumni',
'Har fullført studieprogram i FS')
affiliation_status_student_drgrad = _PersonAffStatusCode(
affiliation_student,
'drgrad',
'Registrert student på doktorgrad')
affiliation_status_student_emnestud = _PersonAffStatusCode(
affiliation_student,
'emnestud',
'Registrert som aktiv emnestudent i FS')
# Tilknyttet affiliation
affiliation_tilknyttet = _PersonAffiliationCode(
'TILKNYTTET',
'Tilknyttet UiT uten å være student eller ansatt')
affiliation_tilknyttet_fagperson = _PersonAffStatusCode(
affiliation_tilknyttet,
'fagperson',
'Registrert som fagperson i FS')
affiliation_tilknyttet_emeritus = _PersonAffStatusCode(
affiliation_tilknyttet,
'emeritus',
'Registrert i LT med gjestetypekode EMERITUS')
affiliation_tilknyttet_ekst_stip = _PersonAffStatusCode(
affiliation_tilknyttet,
'ekst_stip',
'Personer registrert i LT med gjestetypekode=EF-STIP')
# Manual affiliation
affiliation_manuell = _PersonAffiliationCode(
'MANUELL',
'Tilknyttet enheter/instutusjoner som UiT har avtale med')
affiliation_manuell_alumni = _PersonAffStatusCode(
affiliation_manuell,
'alumni',
'Uteksaminerte studenter')
affiliation_manuell_sito = _PersonAffStatusCode(
affiliation_manuell,
'sito',
'Manuelt registrert Sito ansatt')
affiliation_manuell_gjest_u_konto = _PersonAffStatusCode(
affiliation_manuell,
'gjest_u_konto',
'gjest uten konto')
affiliation_manuell_unn = _PersonAffStatusCode(
affiliation_manuell,
'UNN',
'Universitets sykheuset i Nord Norge')
affiliation_manuell_gjest = _PersonAffStatusCode(
affiliation_manuell,
'gjest',
'Gjest')
affiliation_manuell_utdanning_no = _PersonAffStatusCode(
affiliation_manuell,
'utdanning_no',
'Utdanning.no')
affiliation_manuell_akademisk_kvarter = _PersonAffStatusCode(
affiliation_manuell,
'akademisk_kvart',
'Akademisk Kvarter')
affiliation_manuell_norges_universitetet = _PersonAffStatusCode(
affiliation_manuell,
'norges_universi',
'Norgesuniversitetet')
affiliation_manuell_kirkutdnor = _PersonAffStatusCode(
affiliation_manuell,
'kirkutdnor',
'Kirkelig Utdanningssenter Nord-Norge')
affiliation_manuell_gjesteforsker = _PersonAffStatusCode(
affiliation_manuell,
'gjesteforsker',
'Gjesteforsker (under utfasing)')
affiliation_manuell_konsulent = _PersonAffStatusCode(
affiliation_manuell,
'konsulent',
'Konsulent (under utfasing)')
affiliation_status_gjest_u_account = _PersonAffStatusCode(
affiliation_manuell,
'gjest_u_konto',
'Gjest uten konto')
# Non-personal affiliation
affiliation_upersonlig = _PersonAffiliationCode(
'UPERSONLIG',
'Fellesbrukere, samt andre brukere uten eier')
affiliation_upersonlig_felles = _PersonAffStatusCode(
affiliation_upersonlig,
'felles',
'Felleskonti')
affiliation_upersonlig_kurs = _PersonAffStatusCode(
affiliation_upersonlig,
'kurs',
'Kurskonti')
affiliation_upersonlig_pvare = _PersonAffStatusCode(
affiliation_upersonlig,
'pvare',
'Programvarekonti')
affiliation_upersonlig_term_maskin = _PersonAffStatusCode(
affiliation_upersonlig,
'term_maskin',
'Terminalstuemaskin')
affiliation_upersonlig_bib_felles = _PersonAffStatusCode(
affiliation_upersonlig,
'bib_felles',
'Bibliotek felles')
#
# Shells
#
# Override of the default shell paths.
# NOTE: this file should be before PosixUser in cereconf.CLASS_CONSTANTS
# TODO: Shell path mapping should be configuration, not db-constants.
#
posix_shell_bash = _PosixShellCode(
'bash',
'/bin/bash')
posix_shell_csh = _PosixShellCode(
'csh',
'/bin/csh')
posix_shell_false = _PosixShellCode(
'false',
'/bin/false')
posix_shell_nologin = _PosixShellCode(
'nologin',
'/local/etc/nologin')
posix_shell_sh = _PosixShellCode(
'sh',
'/bin/sh')
posix_shell_zsh = _PosixShellCode(
'zsh',
'/local/bin/zsh')
#
# Spreads
#
spread_uit_fronter = _SpreadCode(
'fronter@uit',
Constants.Constants.entity_group,
'fronter user')
spread_uit_fronter_account = _SpreadCode(
'fronter_acc@uit',
Constants.Constants.entity_account,
'fronter account')
spread_uit_evu = _SpreadCode(
'evu@uit',
Constants.Constants.entity_account,
'evu person')
spread_uit_cristin = _SpreadCode(
'cristin@uit',
Constants.Constants.entity_account,
'Accounts with CRISTIN spread')
# spread for ldap guests
spread_uit_ldap_guest = _SpreadCode(
'guest@ldap',
Constants.Constants.entity_account,
'LDAP/RADIUS spread for wireless accounts')
# spread for ldap system accounts
spread_uit_ldap_system = _SpreadCode(
'system@ldap',
Constants.Constants.entity_account,
'account included in system tree on ldap')
# spread for ldap people accounts
spread_uit_ldap_people = _SpreadCode(
'people@ldap',
Constants.Constants.entity_account,
'account included in people tree on ldap')
# spread for securimaster export
spread_uit_securimaster = _SpreadCode(
'securimaster',
Constants.Constants.entity_account,
'account to be exported to securimaster')
# spread for portal export
spread_uit_portal = _SpreadCode(
'portal export',
Constants.Constants.entity_account,
'account to be exported to the portal')
# spread for paga export - account uit should be exported to paga
spread_uit_paga = _SpreadCode(
'paga export',
Constants.Constants.entity_account,
'account to have its uid exported to paga')
# spread for fs export - account should have email and uid exported to FS
spread_uit_fs = _SpreadCode(
'fs export',
Constants.Constants.entity_account,
'account to have its uid and email exported to fs')
spread_uit_ad_account = _SpreadCode(
'AD_account',
Constants.Constants.entity_account,
'account included in Active Directory')
spread_uit_ad_group = _SpreadCode(
'AD_group',
Constants.Constants.entity_group,
'group included in Active Directory')
spread_uit_ad_lit_admingroup = _SpreadCode(
'AD_group_litadmn',
Constants.Constants.entity_group,
'AD admingroup for local IT')
# Spreads for Exchange
spread_uit_exchange = _SpreadCode(
'exchange_mailbox',
Constants.Constants.entity_account,
'Accounts with exchange mailbox')
# sito spread
spread_sito = _SpreadCode(
'SITO',
Constants.Constants.entity_account,
'Accounts generated for sito users')
spread_fronter_dotcom = _SpreadCode(
'CF@fronter.com',
Constants.Constants.entity_group,
'Group representing a course that should be exported to the '
'ClassFronter instance on fronter.com. Should only be given to '
'groups that have been automatically generated from FS.')
spread_ephorte_person = _SpreadCode(
'ePhorte_person',
Constants.Constants.entity_person,
'Person included in ePhorte export')
#
# Email
#
email_server_type_exchange_imap = _EmailServerTypeCode(
'exchange_imap',
"Server is an Exchange server")
#
# Quarantines
#
quarantine_ou_notvalid = _QuarantineCode(
'ou_notvalid',
'OU not valid from external source')
quarantine_auto_emailonly = _QuarantineCode(
'auto_kunepost',
'Ikke ordin<E6>r student, tilgang til bare e-post')
quarantine_auto_inaktiv = _QuarantineCode(
'auto_inaktiv',
'Ikke aktiv student, utestengt')
quarantine_tilbud = _QuarantineCode(
'Tilbud',
"Pre-generert konto til studenter som har fått studietilbud,"
"men som ikke har aktivert kontoen.")
quarantine_sys_x_approved = _QuarantineCode(
'sys-x_approved',
'Konto fra system-x som ikke er godkjent')
quarantine_generell = _QuarantineCode(
'generell',
'Generell splatt')
quarantine_system = _QuarantineCode(
'system',
'Systembrukar som ikke skal logge inn')
quarantine_permisjon = _QuarantineCode(
'permisjon',
'Brukeren har permisjon')
quarantine_svakt_passord = _QuarantineCode(
'svakt_passord',
'For dårlig passord')
quarantine_autopassord = _QuarantineCode(
'autopassord',
'Passord ikke skiftet trass pålegg')
quarantine_sut_disk_usage = _QuarantineCode(
'sut_disk',
"Bruker for mye disk på sut")
#
# Auth codes
#
auth_type_md5_crypt_hex = _AuthenticationCode(
'MD5-crypt2',
"MD5-derived 32 bit password non unix style, no salt")
auth_type_md5_b64 = _AuthenticationCode(
'MD5-crypt_base64',
"MD5-derived 32 bit password base 64 encoded")
#
# Traits
#
trait_sito_registrar_notified = _EntityTraitCode(
'sito_req_mailed',
Constants.Constants.entity_account,
"Trait set on account when sito processing is done"
)
trait_sito_user_notified = _EntityTraitCode(
'sito_user_mailed',
Constants.Constants.entity_account,
"Trait set on account after account created mail is sent to user"
)
trait_sysx_registrar_notified = _EntityTraitCode(
'sysx_reg_mailed',
Constants.Constants.entity_account,
"Trait set on account when systemx processing is done"
)
trait_sysx_user_notified = _EntityTraitCode(
'sysx_user_mailed',
Constants.Constants.entity_account,
"Trait set on account after account created mail is sent to user"
)
trait_primary_aff = _EntityTraitCode(
"primary_aff",
Constants.Constants.entity_person,
"A person's chosen primary affiliation,"
" for use at the web presentations"
)
trait_sysx_registrar_notified = _EntityTraitCode(
'sysx_reg_mailed',
Constants.Constants.entity_account,
"Trait set on account when systemx processing is done"
)
trait_sysx_user_notified = _EntityTraitCode(
'sysx_user_mailed',
Constants.Constants.entity_account,
"Trait set on account after account created mail is sent to user"
)
trait_nsd_unit_code = _EntityTraitCode(
'nsd_unit_code',
Constants.Constants.entity_ou,
'NSD unit code (Avdelingskode) to use for this OU'
)
|
django-json-api/rest_framework_ember | refs/heads/master | docs/conf.py | 2 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Django REST Framework JSON API documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 24 23:31:15 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import sys
import django
from sphinx.ext.apidoc import main
from rest_framework_json_api import VERSION
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_SETTINGS_MODULE"] = "example.settings"
django.setup()
# Auto-generate API documentation.
main(["-o", "apidoc", "-f", "-e", "-T", "-M", "../rest_framework_json_api"])
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "recommonmark"]
autodoc_member_order = "bysource"
autodoc_inherit_docstrings = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Django REST Framework JSON API"
year = datetime.date.today().year
copyright = "{}, Django REST Framework JSON API contributors".format(year)
author = "Django REST Framework JSON API contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "pull_request_template.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "DjangoRESTFrameworkJSONAPIdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"DjangoRESTFrameworkJSONAPI.tex",
"Django REST Framework JSON API Documentation",
"Django REST Framework JSON API contributors",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"djangorestframeworkjsonapi",
"Django REST Framework JSON API Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"DjangoRESTFrameworkJSONAPI",
"Django REST Framework JSON API Documentation",
author,
"DjangoRESTFrameworkJSONAPI",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
hyrole/scrapy | refs/heads/master | scrapy/contrib/downloadermiddleware/httpcompression.py | 144 | import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.downloadermiddleware.httpcompression` is deprecated, "
"use `scrapy.downloadermiddlewares.httpcompression` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.downloadermiddlewares.httpcompression import *
|
cjdelisle/cjdns | refs/heads/master | node_build/dependencies/libuv/build/gyp/test/actions/gyptest-generated-header.py | 187 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that dependencies on generated headers work, even if the header has
a mixed-case file name.
"""
import TestGyp
test = TestGyp.TestGyp()
CHDIR = 'generated-header'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', 'program', chdir=CHDIR)
test.up_to_date('test.gyp', 'program', chdir=CHDIR)
expect = 'foobar output\n'
test.run_built_executable('program', chdir=CHDIR, stdout=expect)
# Change what's written to the generated header, regyp and rebuild, and check
# that the change makes it to the executable and that the build is clean.
test.sleep()
test.write('generated-header/test.gyp',
test.read('generated-header/test.gyp').replace('foobar', 'barbaz'))
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', 'program', chdir=CHDIR)
test.up_to_date('test.gyp', 'program', chdir=CHDIR)
expect = 'barbaz output\n'
test.run_built_executable('program', chdir=CHDIR, stdout=expect)
test.pass_test()
|
tokyo-jesus/tamactiluya | refs/heads/master | tamactiluya/models.py | 3 | # -*- coding: utf-8 -*-
from flask_login import UserMixin
from tembodb import TemboDB, FileType
from tembodb.metadata import Text, Date, Password
from tembodb.metadata import Payload, RevisionNumber, Extension
db = TemboDB()
db.use_locks = True
@db.register_file_type('user')
class User(FileType, UserMixin):
uname = Text()
passwd = Password()
registered_on = Date()
real_name = Text()
url = Text()
email = Text()
bio = Text()
# Flask-Login methods
def authenticate(self, passwd: str) -> bool:
"""
Verifies the existence of the current user, checks if passwords match.
Make sure you call `save()` on new users before authentication or this check
will fail.
"""
if self.passwd == passwd and User(self.uname):
return True
else:
return False
def get_id(self) -> str:
return self.uname
@db.register_file_type('article')
class Article(FileType):
revision = RevisionNumber()
title = Text()
edited_by = Text()
edited_on = Date()
content = Payload()
extension = Extension("md")
|
krez13/scikit-learn | refs/heads/master | sklearn/metrics/pairwise.py | 14 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter *dense_output* for sparse output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
analyseuc3m/ANALYSE-v1 | refs/heads/master | openedx/core/djangoapps/programs/migrations/0003_auto_20151120_1613.py | 83 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('programs', '0002_programsapiconfig_cache_ttl'),
]
operations = [
migrations.AddField(
model_name='programsapiconfig',
name='authoring_app_css_path',
field=models.CharField(
max_length=255,
help_text='This value is required in order to enable the Studio authoring interface.',
verbose_name="Path to authoring app's CSS",
blank=True
),
),
migrations.AddField(
model_name='programsapiconfig',
name='authoring_app_js_path',
field=models.CharField(
max_length=255,
help_text='This value is required in order to enable the Studio authoring interface.',
verbose_name="Path to authoring app's JS",
blank=True
),
),
migrations.AddField(
model_name='programsapiconfig',
name='enable_studio_tab',
field=models.BooleanField(default=False, verbose_name='Enable Studio Authoring Interface'),
),
migrations.AlterField(
model_name='programsapiconfig',
name='enable_student_dashboard',
field=models.BooleanField(default=False, verbose_name='Enable Student Dashboard Displays'),
),
]
|
xxsergzzxx/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/test_pty.py | 48 | from test.support import verbose, run_unittest, import_module, reap_children
#Skip these tests if either fcntl or termios is not available
fcntl = import_module('fcntl')
import_module('termios')
import errno
import pty
import os
import sys
import signal
import unittest
TEST_STRING_1 = b"I wish to buy a fish license.\n"
TEST_STRING_2 = b"For my pet fish, Eric.\n"
if verbose:
def debug(msg):
print(msg)
else:
def debug(msg):
pass
def normalize_output(data):
# Some operating systems do conversions on newline. We could possibly
# fix that by doing the appropriate termios.tcsetattr()s. I couldn't
# figure out the right combo on Tru64 and I don't have an IRIX box.
# So just normalize the output and doc the problem O/Ses by allowing
# certain combinations for some platforms, but avoid allowing other
# differences (like extra whitespace, trailing garbage, etc.)
# This is about the best we can do without getting some feedback
# from someone more knowledgable.
# OSF/1 (Tru64) apparently turns \n into \r\r\n.
if data.endswith(b'\r\r\n'):
return data.replace(b'\r\r\n', b'\n')
# IRIX apparently turns \n into \r\n.
if data.endswith(b'\r\n'):
return data.replace(b'\r\n', b'\n')
return data
# Marginal testing of pty suite. Cannot do extensive 'do or fail' testing
# because pty code is not too portable.
# XXX(nnorwitz): these tests leak fds when there is an error.
class PtyTest(unittest.TestCase):
def setUp(self):
# isatty() and close() can hang on some platforms. Set an alarm
# before running the test to make sure we don't hang forever.
self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig)
signal.alarm(10)
def tearDown(self):
# remove alarm, restore old alarm handler
signal.alarm(0)
signal.signal(signal.SIGALRM, self.old_alarm)
def handle_sig(self, sig, frame):
self.fail("isatty hung")
def test_basic(self):
try:
debug("Calling master_open()")
master_fd, slave_name = pty.master_open()
debug("Got master_fd '%d', slave_name '%s'" %
(master_fd, slave_name))
debug("Calling slave_open(%r)" % (slave_name,))
slave_fd = pty.slave_open(slave_name)
debug("Got slave_fd '%d'" % slave_fd)
except OSError:
# " An optional feature could not be imported " ... ?
raise unittest.SkipTest("Pseudo-terminals (seemingly) not functional.")
self.assertTrue(os.isatty(slave_fd), 'slave_fd is not a tty')
# Solaris requires reading the fd before anything is returned.
# My guess is that since we open and close the slave fd
# in master_open(), we need to read the EOF.
# Ensure the fd is non-blocking in case there's nothing to read.
orig_flags = fcntl.fcntl(master_fd, fcntl.F_GETFL)
fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK)
try:
s1 = os.read(master_fd, 1024)
self.assertEqual(b'', s1)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
# Restore the original flags.
fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags)
debug("Writing to slave_fd")
os.write(slave_fd, TEST_STRING_1)
s1 = os.read(master_fd, 1024)
self.assertEqual(b'I wish to buy a fish license.\n',
normalize_output(s1))
debug("Writing chunked output")
os.write(slave_fd, TEST_STRING_2[:5])
os.write(slave_fd, TEST_STRING_2[5:])
s2 = os.read(master_fd, 1024)
self.assertEqual(b'For my pet fish, Eric.\n', normalize_output(s2))
os.close(slave_fd)
os.close(master_fd)
def test_fork(self):
debug("calling pty.fork()")
pid, master_fd = pty.fork()
if pid == pty.CHILD:
# stdout should be connected to a tty.
if not os.isatty(1):
debug("Child's fd 1 is not a tty?!")
os._exit(3)
# After pty.fork(), the child should already be a session leader.
# (on those systems that have that concept.)
debug("In child, calling os.setsid()")
try:
os.setsid()
except OSError:
# Good, we already were session leader
debug("Good: OSError was raised.")
pass
except AttributeError:
# Have pty, but not setsid()?
debug("No setsid() available?")
pass
except:
# We don't want this error to propagate, escaping the call to
# os._exit() and causing very peculiar behavior in the calling
# regrtest.py !
# Note: could add traceback printing here.
debug("An unexpected error was raised.")
os._exit(1)
else:
debug("os.setsid() succeeded! (bad!)")
os._exit(2)
os._exit(4)
else:
debug("Waiting for child (%d) to finish." % pid)
# In verbose mode, we have to consume the debug output from the
# child or the child will block, causing this test to hang in the
# parent's waitpid() call. The child blocks after a
# platform-dependent amount of data is written to its fd. On
# Linux 2.6, it's 4000 bytes and the child won't block, but on OS
# X even the small writes in the child above will block it. Also
# on Linux, the read() will throw an OSError (input/output error)
# when it tries to read past the end of the buffer but the child's
# already exited, so catch and discard those exceptions. It's not
# worth checking for EIO.
while True:
try:
data = os.read(master_fd, 80)
except OSError:
break
if not data:
break
sys.stdout.write(str(data.replace(b'\r\n', b'\n'),
encoding='ascii'))
##line = os.read(master_fd, 80)
##lines = line.replace('\r\n', '\n').split('\n')
##if False and lines != ['In child, calling os.setsid()',
## 'Good: OSError was raised.', '']:
## raise TestFailed("Unexpected output from child: %r" % line)
(pid, status) = os.waitpid(pid, 0)
res = status >> 8
debug("Child (%d) exited with status %d (%d)." % (pid, res, status))
if res == 1:
self.fail("Child raised an unexpected exception in os.setsid()")
elif res == 2:
self.fail("pty.fork() failed to make child a session leader.")
elif res == 3:
self.fail("Child spawned by pty.fork() did not have a tty as stdout")
elif res != 4:
self.fail("pty.fork() failed for unknown reasons.")
##debug("Reading from master_fd now that the child has exited")
##try:
## s1 = os.read(master_fd, 1024)
##except os.error:
## pass
##else:
## raise TestFailed("Read from master_fd did not raise exception")
os.close(master_fd)
# pty.fork() passed.
def test_main(verbose=None):
try:
run_unittest(PtyTest)
finally:
reap_children()
if __name__ == "__main__":
test_main()
|
nirmeshk/oh-mainline | refs/heads/master | vendor/packages/gdata/src/gdata/Crypto/PublicKey/__init__.py | 273 | """Public-key encryption and signature algorithms.
Public-key encryption uses two different keys, one for encryption and
one for decryption. The encryption key can be made public, and the
decryption key is kept private. Many public-key algorithms can also
be used to sign messages, and some can *only* be used for signatures.
Crypto.PublicKey.DSA Digital Signature Algorithm. (Signature only)
Crypto.PublicKey.ElGamal (Signing and encryption)
Crypto.PublicKey.RSA (Signing, encryption, and blinding)
Crypto.PublicKey.qNEW (Signature only)
"""
__all__ = ['RSA', 'DSA', 'ElGamal', 'qNEW']
__revision__ = "$Id: __init__.py,v 1.4 2003/04/03 20:27:13 akuchling Exp $"
|
dvliman/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.5/tests/regressiontests/model_fields/models.py | 47 | import os
import tempfile
# Try to import PIL in either of the two ways it can end up installed.
# Checking for the existence of Image is enough for CPython, but for PyPy,
# you need to check for the underlying modules.
try:
from PIL import Image, _imaging
except ImportError:
try:
import Image, _imaging
except ImportError:
Image = None
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageFieldFile, ImageField
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo)
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1,'First'),
(2,'Second'),
)
),
('Group 2', (
(3,'Third'),
(4,'Fourth'),
)
),
(0,'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class BigS(models.Model):
s = models.SlugField(max_length=255)
class BigInt(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null = True, blank = True)
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField()
string = models.CharField(max_length=10, default='abc')
class FksToBooleans(models.Model):
"""Model wih FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel)
nbf = models.ForeignKey(NullBooleanModel)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1,'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2")
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99)
field5 = models.DateField("verbose field5")
field6 = models.DateTimeField("verbose field6")
field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1)
field8 = models.EmailField("verbose field8")
field9 = models.FileField("verbose field9", upload_to="unused")
field10 = models.FilePathField("verbose field10")
field11 = models.FloatField("verbose field11")
# Don't want to depend on PIL in this test
#field_image = models.ImageField("verbose field")
field12 = models.IntegerField("verbose field12")
field13 = models.IPAddressField("verbose field13")
field14 = models.GenericIPAddressField("verbose field14", protocol="ipv4")
field15 = models.NullBooleanField("verbose field15")
field16 = models.PositiveIntegerField("verbose field16")
field17 = models.PositiveSmallIntegerField("verbose field17")
field18 = models.SlugField("verbose field18")
field19 = models.SmallIntegerField("verbose field19")
field20 = models.TextField("verbose field20")
field21 = models.TimeField("verbose field21")
field22 = models.URLField("verbose field22")
# This model isn't used in any test, just here to ensure it validates successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If PIL available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args,**kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class PersonWithHeight(models.Model):
"""
Model that defines an ImageField with only one dimension field.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullalble ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
###############################################################################
|
methane/uwsgi | refs/heads/master | plugins/graylog2/uwsgiplugin.py | 21 | NAME = 'graylog2'
CFLAGS = []
LDFLAGS = []
LIBS = ['-lz']
GCC_LIST = ['graylog2_plugin']
|
aidanlister/django | refs/heads/master | tests/model_formsets/test_uuid.py | 466 | from django.forms.models import inlineformset_factory
from django.test import TestCase
from .models import (
AutoPKChildOfUUIDPKParent, AutoPKParent, ChildRelatedViaAK,
ChildWithEditablePK, ParentWithUUIDAlternateKey, UUIDPKChild,
UUIDPKChildOfAutoPKParent, UUIDPKParent,
)
class InlineFormsetTests(TestCase):
def test_inlineformset_factory_nulls_default_pks(self):
"""
#24377 - If we're adding a new object, a parent's auto-generated pk
from the model field default should be ignored as it's regenerated on
the save request.
Tests the case where both the parent and child have a UUID primary key.
"""
FormSet = inlineformset_factory(UUIDPKParent, UUIDPKChild, fields='__all__')
formset = FormSet()
self.assertIsNone(formset.forms[0].fields['parent'].initial)
def test_inlineformset_factory_ignores_default_pks_on_submit(self):
"""
#24377 - Inlines with a model field default should ignore that default
value to avoid triggering validation on empty forms.
"""
FormSet = inlineformset_factory(UUIDPKParent, UUIDPKChild, fields='__all__')
formset = FormSet({
'uuidpkchild_set-TOTAL_FORMS': 3,
'uuidpkchild_set-INITIAL_FORMS': 0,
'uuidpkchild_set-MAX_NUM_FORMS': '',
'uuidpkchild_set-0-name': 'Foo',
'uuidpkchild_set-1-name': '',
'uuidpkchild_set-2-name': '',
})
self.assertTrue(formset.is_valid())
def test_inlineformset_factory_nulls_default_pks_uuid_parent_auto_child(self):
"""
#24958 - Variant of test_inlineformset_factory_nulls_default_pks for
the case of a parent object with a UUID primary key and a child object
with an AutoField primary key.
"""
FormSet = inlineformset_factory(UUIDPKParent, AutoPKChildOfUUIDPKParent, fields='__all__')
formset = FormSet()
self.assertIsNone(formset.forms[0].fields['parent'].initial)
def test_inlineformset_factory_nulls_default_pks_auto_parent_uuid_child(self):
"""
#24958 - Variant of test_inlineformset_factory_nulls_default_pks for
the case of a parent object with an AutoField primary key and a child
object with a UUID primary key.
"""
FormSet = inlineformset_factory(AutoPKParent, UUIDPKChildOfAutoPKParent, fields='__all__')
formset = FormSet()
self.assertIsNone(formset.forms[0].fields['parent'].initial)
def test_inlineformset_factory_nulls_default_pks_child_editable_pk(self):
"""
#24958 - Variant of test_inlineformset_factory_nulls_default_pks for
the case of a parent object with a UUID primary key and a child
object with an editable natural key for a primary key.
"""
FormSet = inlineformset_factory(UUIDPKParent, ChildWithEditablePK, fields='__all__')
formset = FormSet()
self.assertIsNone(formset.forms[0].fields['parent'].initial)
def test_inlineformset_factory_nulls_default_pks_alternate_key_relation(self):
"""
#24958 - Variant of test_inlineformset_factory_nulls_default_pks for
the case of a parent object with a UUID alternate key and a child
object that relates to that alternate key.
"""
FormSet = inlineformset_factory(ParentWithUUIDAlternateKey, ChildRelatedViaAK, fields='__all__')
formset = FormSet()
self.assertIsNone(formset.forms[0].fields['parent'].initial)
|
rendermotion/RMMel | refs/heads/master | creators/loft.py | 2 | from RMPY import nameConvention
from RMPY.core import config
import pymel.core as pm
from RMPY.creators import creatorsBase
from RMPY.creators import curve
from RMPY.creators import spaceLocator
class Creator(creatorsBase.Creator):
def __init__(self, *args, **kwargs):
super(Creator, self).__init__(*args, **kwargs)
self.path = None
self.skin_surface = None
self.surface_radius = None
self.circle = None
def surface_radius(self):
return self._model.surface_radius
def point_base(self, *points, **kwargs):
super(Creator, self).point_base(*points, **kwargs)
radius = kwargs.pop('radius', 1)
delete_history = kwargs.pop('delete_history', True)
curve = curve.Curve()
locator = spaceLocator.Creator()
self.path = curve.point_base(*points, ep=True)
self.circle = pm.circle(radius=radius)[0]
self.name_conv.rename_name_in_format(self.circle, name='circle')
original_surf = pm.extrude(self.circle, self.path, fixedPath=True,
useComponentPivot=1, useProfileNormal=True)
swap_uv = pm.rebuildSurface(original_surf[0], keepControlPoints=True, rebuildType=0,
keepRange=0)
new_surface = pm.reverseSurface(swap_uv[0], direction=3)
self.skin_surface = new_surface[0]
self.name_conv.rename_name_in_format(self.skin_surface, name='extrude')
cps = pm.createNode('closestPointOnSurface')
measure_locator = locator.point_base(points[0])
measure_locator.worldPosition >> cps.inPosition
self.skin_surface.worldSpace[0] >> cps.inputSurface
u_closer_value = cps.parameterU.get()
pm.delete(measure_locator, cps)
if u_closer_value > .5:
new_surface = pm.reverseSurface(self.skin_surface, direction=0)
self.skin_surface = new_surface[0]
if delete_history:
self.delete_history()
return self.skin_surface
def curve_base(self, *path, **kwargs):
swap_uv = kwargs.pop('swap_uv', True)
radius = kwargs.pop('radius', 1)
delete_history = kwargs.pop('delete_history', False)
self.path = path[0]
self.circle = pm.circle(radius=radius)[0]
self.name_conv.rename_name_in_format(self.circle, name='circle')
original_surf = pm.extrude(self.circle, self.path, fixedPath=True,
useComponentPivot=1, useProfileNormal=True)
if swap_uv:
swap_uv_curve = pm.rebuildSurface(original_surf[0], keepControlPoints=True, rebuildType=0, keepRange=0)
new_surface = pm.reverseSurface(swap_uv_curve[0], direction=3)
self.skin_surface = new_surface[0]
else:
self.skin_surface = original_surf
self.name_conv.rename_name_in_format(self.skin_surface, name='extrude')
if delete_history:
self.delete_history()
return self.skin_surface
def delete_history(self):
pm.delete(self.skin_surface, constructionHistory=True)
pm.delete(self.path)
pm.delete(self.circle)
def uniform_rebuild(self, number_of_points):
rebuild_surf = pm.rebuildSurface(self.skin_surface, rebuildType=0,
spansV=number_of_points,
keepRange=True)
self.skin_surface = rebuild_surf[0]
if __name__ == '__main__':
selection = pm.ls(selection=True)
nurbs_surface = Creator()
nurbs_surface.curve_base()
#nurbs_surface.point_base(*selection, ep=True, delete_history=False)
#nurbs_surface.uniform_rebuild(10) |
moyogo/robofontmechanic | refs/heads/master | Mechanic.roboFontExt/lib/mechanic/ui/text.py | 6 | from AppKit import NSFontAttributeName, NSFont, NSMutableAttributedString, \
NSAttributedString
class Text(object):
"""Convenience class for returning NSAttributedStrings."""
@staticmethod
def regular(size):
return {NSFontAttributeName: NSFont.systemFontOfSize_(size)}
@staticmethod
def bold(size):
return {NSFontAttributeName: NSFont.boldSystemFontOfSize_(size)}
@classmethod
def string(self, text="", size=13, style="regular", mutable=False):
if mutable:
s = NSMutableAttributedString
else:
s = NSAttributedString
weight = getattr(self, style)(size)
return s.alloc().initWithString_attributes_(text, weight)
|
heytcass/homeassistant-config | refs/heads/master | deps/fuzzywuzzy/StringMatcher.py | 17 | #!/usr/bin/env python
# encoding: utf-8
"""
StringMatcher.py
ported from python-Levenshtein
[https://github.com/miohtama/python-Levenshtein]
License available here: https://github.com/miohtama/python-Levenshtein/blob/master/COPYING
"""
from Levenshtein import *
from warnings import warn
class StringMatcher:
"""A SequenceMatcher-like class built on the top of Levenshtein"""
def _reset_cache(self):
self._ratio = self._distance = None
self._opcodes = self._editops = self._matching_blocks = None
def __init__(self, isjunk=None, seq1='', seq2=''):
if isjunk:
warn("isjunk not NOT implemented, it will be ignored")
self._str1, self._str2 = seq1, seq2
self._reset_cache()
def set_seqs(self, seq1, seq2):
self._str1, self._str2 = seq1, seq2
self._reset_cache()
def set_seq1(self, seq1):
self._str1 = seq1
self._reset_cache()
def set_seq2(self, seq2):
self._str2 = seq2
self._reset_cache()
def get_opcodes(self):
if not self._opcodes:
if self._editops:
self._opcodes = opcodes(self._editops, self._str1, self._str2)
else:
self._opcodes = opcodes(self._str1, self._str2)
return self._opcodes
def get_editops(self):
if not self._editops:
if self._opcodes:
self._editops = editops(self._opcodes, self._str1, self._str2)
else:
self._editops = editops(self._str1, self._str2)
return self._editops
def get_matching_blocks(self):
if not self._matching_blocks:
self._matching_blocks = matching_blocks(self.get_opcodes(),
self._str1, self._str2)
return self._matching_blocks
def ratio(self):
if not self._ratio:
self._ratio = ratio(self._str1, self._str2)
return self._ratio
def quick_ratio(self):
# This is usually quick enough :o)
if not self._ratio:
self._ratio = ratio(self._str1, self._str2)
return self._ratio
def real_quick_ratio(self):
len1, len2 = len(self._str1), len(self._str2)
return 2.0 * min(len1, len2) / (len1 + len2)
def distance(self):
if not self._distance:
self._distance = distance(self._str1, self._str2)
return self._distance
|
GageGaskins/osf.io | refs/heads/develop | scripts/osfstorage/settings/__init__.py | 105 | # encoding: utf-8
from .defaults import *
try:
from .local import *
except ImportError as error:
pass
|
fabian4/ceilometer | refs/heads/master | ceilometer/tests/functional/api/v2/test_statistics_scenarios.py | 7 | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test events statistics retrieval."""
import datetime
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.tests import db as tests_db
from ceilometer.tests.functional.api import v2
class TestMaxProjectVolume(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
PATH = '/meters/volume.size/statistics'
def setUp(self):
super(TestMaxProjectVolume, self).setUp()
for i in range(3):
s = sample.Sample(
'volume.size',
'gauge',
'GiB',
5 + i,
'user-id',
'project1',
'resource-id-%s' % i,
timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.sample',
},
source='source1',
)
msg = utils.meter_message_from_counter(
s, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
def test_no_time_bounds(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
}])
self.assertEqual(7, data[0]['max'])
self.assertEqual(3, data[0]['count'])
def test_start_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(7, data[0]['max'])
self.assertEqual(2, data[0]['count'])
def test_start_timestamp_after(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T12:34:00',
},
])
self.assertEqual([], data)
def test_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(5, data[0]['max'])
self.assertEqual(1, data[0]['count'])
def test_end_timestamp_before(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T09:54:00',
},
])
self.assertEqual([], data)
def test_start_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:32:00',
},
])
self.assertEqual(6, data[0]['max'])
self.assertEqual(1, data[0]['count'])
class TestMaxResourceVolume(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
PATH = '/meters/volume.size/statistics'
def setUp(self):
super(TestMaxResourceVolume, self).setUp()
for i in range(3):
s = sample.Sample(
'volume.size',
'gauge',
'GiB',
5 + i,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.sample',
},
source='source1',
)
msg = utils.meter_message_from_counter(
s, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
def test_no_time_bounds(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
}])
self.assertEqual(7, data[0]['max'])
self.assertEqual(3, data[0]['count'])
def test_no_time_bounds_with_period(self):
data = self.get_json(self.PATH,
q=[{'field': 'resource_id',
'value': 'resource-id'}],
period=3600)
self.assertEqual(3, len(data))
self.assertEqual(set([u'2012-09-25T10:30:00',
u'2012-09-25T12:32:00',
u'2012-09-25T11:31:00']),
set(x['duration_start'] for x in data))
self.assertEqual(3600, data[0]['period'])
self.assertEqual(set([u'2012-09-25T10:30:00',
u'2012-09-25T11:30:00',
u'2012-09-25T12:30:00']),
set(x['period_start'] for x in data))
def test_period_with_negative_value(self):
resp = self.get_json(self.PATH, expect_errors=True,
q=[{'field': 'resource_id',
'value': 'resource-id'}],
period=-1)
self.assertEqual(400, resp.status_code)
@tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase', 'db2')
def test_period_with_large_value(self):
resp = self.get_json(self.PATH, expect_errors=True,
q=[{'field': 'user_id',
'value': 'user-id'}],
period=10000000000000)
self.assertEqual(400, resp.status_code)
self.assertIn(b"Invalid period", resp.body)
def test_start_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(7, data[0]['max'])
self.assertEqual(2, data[0]['count'])
def test_start_timestamp_after(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T12:34:00',
},
])
self.assertEqual([], data)
def test_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(5, data[0]['max'])
self.assertEqual(1, data[0]['count'])
def test_end_timestamp_before(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T09:54:00',
},
])
self.assertEqual([], data)
def test_start_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:32:00',
},
])
self.assertEqual(6, data[0]['max'])
self.assertEqual(1, data[0]['count'])
class TestSumProjectVolume(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
PATH = '/meters/volume.size/statistics'
def setUp(self):
super(TestSumProjectVolume, self).setUp()
for i in range(3):
s = sample.Sample(
'volume.size',
'gauge',
'GiB',
5 + i,
'user-id',
'project1',
'resource-id-%s' % i,
timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.sample',
},
source='source1',
)
msg = utils.meter_message_from_counter(
s, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
def test_no_time_bounds(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
}])
expected = 5 + 6 + 7
self.assertEqual(expected, data[0]['sum'])
self.assertEqual(3, data[0]['count'])
def test_start_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
])
expected = 6 + 7
self.assertEqual(expected, data[0]['sum'])
self.assertEqual(2, data[0]['count'])
def test_start_timestamp_after(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T12:34:00',
},
])
self.assertEqual([], data)
def test_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:30:00',
},
])
self.assertEqual(5, data[0]['sum'])
self.assertEqual(1, data[0]['count'])
def test_end_timestamp_before(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T09:54:00',
},
])
self.assertEqual([], data)
def test_start_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'project_id',
'value': 'project1',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:32:00',
},
])
self.assertEqual(6, data[0]['sum'])
self.assertEqual(1, data[0]['count'])
class TestSumResourceVolume(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
PATH = '/meters/volume.size/statistics'
def setUp(self):
super(TestSumResourceVolume, self).setUp()
for i in range(3):
s = sample.Sample(
'volume.size',
'gauge',
'GiB',
5 + i,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
resource_metadata={'display_name': 'test-volume',
'tag': 'self.sample',
},
source='source1',
)
msg = utils.meter_message_from_counter(
s, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
def test_no_time_bounds(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
}])
self.assertEqual(5 + 6 + 7, data[0]['sum'])
self.assertEqual(3, data[0]['count'])
def test_no_time_bounds_with_period(self):
data = self.get_json(self.PATH,
q=[{'field': 'resource_id',
'value': 'resource-id'}],
period=1800)
self.assertEqual(3, len(data))
self.assertEqual(set([u'2012-09-25T10:30:00',
u'2012-09-25T12:32:00',
u'2012-09-25T11:31:00']),
set(x['duration_start'] for x in data))
self.assertEqual(1800, data[0]['period'])
self.assertEqual(set([u'2012-09-25T10:30:00',
u'2012-09-25T11:30:00',
u'2012-09-25T12:30:00']),
set(x['period_start'] for x in data))
def test_start_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
}])
self.assertEqual(6 + 7, data[0]['sum'])
self.assertEqual(2, data[0]['count'])
def test_start_timestamp_with_period(self):
data = self.get_json(self.PATH,
q=[{'field': 'resource_id',
'value': 'resource-id'},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T10:15:00'}],
period=7200)
self.assertEqual(2, len(data))
self.assertEqual(set([u'2012-09-25T10:30:00',
u'2012-09-25T12:32:00']),
set(x['duration_start'] for x in data))
self.assertEqual(7200, data[0]['period'])
self.assertEqual(set([u'2012-09-25T10:15:00',
u'2012-09-25T12:15:00']),
set(x['period_start'] for x in data))
def test_start_timestamp_after(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T12:34:00',
}])
self.assertEqual([], data)
def test_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T11:30:00',
}])
self.assertEqual(5, data[0]['sum'])
self.assertEqual(1, data[0]['count'])
def test_end_timestamp_before(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'le',
'value': '2012-09-25T09:54:00',
}])
self.assertEqual([], data)
def test_start_end_timestamp(self):
data = self.get_json(self.PATH, q=[{'field': 'resource_id',
'value': 'resource-id',
},
{'field': 'timestamp',
'op': 'ge',
'value': '2012-09-25T11:30:00',
},
{'field': 'timestamp',
'op': 'lt',
'value': '2012-09-25T11:32:00',
}])
self.assertEqual(6, data[0]['sum'])
self.assertEqual(1, data[0]['count'])
class TestGroupByInstance(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
PATH = '/meters/instance/statistics'
def setUp(self):
super(TestGroupByInstance, self).setUp()
test_sample_data = (
{'volume': 2, 'user': 'user-1', 'project': 'project-1',
'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10),
'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1',
'source': 'source-2'},
{'volume': 2, 'user': 'user-1', 'project': 'project-2',
'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-1',
'source': 'source-2'},
{'volume': 1, 'user': 'user-2', 'project': 'project-1',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11),
'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
'source': 'source-1'},
{'volume': 1, 'user': 'user-2', 'project': 'project-1',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source-1'},
{'volume': 2, 'user': 'user-2', 'project': 'project-1',
'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source-1'},
{'volume': 4, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source-1'},
{'volume': 4, 'user': 'user-3', 'project': 'project-1',
'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22),
'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
'source': 'source-3'},
)
for test_sample in test_sample_data:
c = sample.Sample(
'instance',
sample.TYPE_CUMULATIVE,
unit='s',
volume=test_sample['volume'],
user_id=test_sample['user'],
project_id=test_sample['project'],
resource_id=test_sample['resource'],
timestamp=datetime.datetime(*test_sample['timestamp']),
resource_metadata={'flavor': test_sample['metadata_flavor'],
'event': test_sample['metadata_event'], },
source=test_sample['source'],
)
msg = utils.meter_message_from_counter(
c, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
def test_group_by_user(self):
data = self.get_json(self.PATH, groupby=['user_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['user_id']), groupby_keys_set)
self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'user_id': 'user-1'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'user_id': 'user-2'}:
self.assertEqual(4, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(8, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'user_id': 'user-3'}:
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
def test_group_by_resource(self):
data = self.get_json(self.PATH, groupby=['resource_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['resource_id']), groupby_keys_set)
self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']),
groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'resource_id': 'resource-1'}:
self.assertEqual(3, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(6, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'resource_id': 'resource-2'}:
self.assertEqual(3, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(6, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'resource_id': 'resource-3'}:
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
def test_group_by_project(self):
data = self.get_json(self.PATH, groupby=['project_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'project_id': 'project-1'}:
self.assertEqual(5, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(10, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'project_id': 'project-2'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(6, r['sum'])
self.assertEqual(3, r['avg'])
def test_group_by_unknown_field(self):
response = self.get_json(self.PATH,
expect_errors=True,
groupby=['wtf'])
self.assertEqual(400, response.status_code)
def test_group_by_multiple_regular(self):
data = self.get_json(self.PATH, groupby=['user_id', 'resource_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set)
self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1',
'resource-2', 'resource-3']),
groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'user_id': 'user-1',
'resource_id': 'resource-1'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'user_id': 'user-2',
'resource_id': 'resource-1'}:
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'user_id': 'user-2',
'resource_id': 'resource-2'}:
self.assertEqual(3, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(6, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'user_id': 'user-3',
'resource_id': 'resource-3'}:
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
else:
self.assertNotEqual(grp, {'user_id': 'user-1',
'resource_id': 'resource-2'})
self.assertNotEqual(grp, {'user_id': 'user-1',
'resource_id': 'resource-3'})
self.assertNotEqual(grp, {'user_id': 'user-2',
'resource_id': 'resource-3'})
self.assertNotEqual(grp, {'user_id': 'user-3',
'resource_id': 'resource-1'})
self.assertNotEqual(grp, {'user_id': 'user-3',
'resource_id': 'resource-2'})
def test_group_by_with_query_filter(self):
data = self.get_json(self.PATH,
q=[{'field': 'project_id',
'op': 'eq',
'value': 'project-1'}],
groupby=['resource_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['resource_id']), groupby_keys_set)
self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']),
groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'resource_id': 'resource-1'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'resource_id': 'resource-2'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(1, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(1, r['avg'])
elif grp == {'resource_id': 'resource-3'}:
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
def test_group_by_with_query_filter_multiple(self):
data = self.get_json(self.PATH,
q=[{'field': 'user_id',
'op': 'eq',
'value': 'user-2'},
{'field': 'source',
'op': 'eq',
'value': 'source-1'}],
groupby=['project_id', 'resource_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2',
'resource-1', 'resource-2']),
groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'project_id': 'project-1',
'resource_id': 'resource-1'}:
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'project_id': 'project-1',
'resource_id': 'resource-2'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(1, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(1, r['avg'])
elif grp == {'project_id': 'project-2',
'resource_id': 'resource-2'}:
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
else:
self.assertNotEqual(grp, {'project_id': 'project-2',
'resource_id': 'resource-1'})
def test_group_by_with_period(self):
data = self.get_json(self.PATH,
groupby=['project_id'],
period=7200)
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
period_start_set = set(sub_dict['period_start'] for sub_dict in data)
period_start_valid = set([u'2013-08-01T10:11:00',
u'2013-08-01T14:11:00',
u'2013-08-01T16:11:00'])
self.assertEqual(period_start_valid, period_start_set)
for r in data:
grp = r['groupby']
period_start = r['period_start']
if (grp == {'project_id': 'project-1'} and
period_start == u'2013-08-01T10:11:00'):
self.assertEqual(3, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(6, r['sum'])
self.assertEqual(2, r['avg'])
self.assertEqual(4260, r['duration'])
self.assertEqual(u'2013-08-01T10:11:00', r['duration_start'])
self.assertEqual(u'2013-08-01T11:22:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T12:11:00', r['period_end'])
elif (grp == {'project_id': 'project-1'} and
period_start == u'2013-08-01T14:11:00'):
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(2, r['avg'])
self.assertEqual(4260, r['duration'])
self.assertEqual(u'2013-08-01T14:59:00', r['duration_start'])
self.assertEqual(u'2013-08-01T16:10:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T16:11:00', r['period_end'])
elif (grp == {'project_id': 'project-2'} and
period_start == u'2013-08-01T14:11:00'):
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(2, r['avg'])
self.assertEqual(0, r['duration'])
self.assertEqual(u'2013-08-01T15:37:00', r['duration_start'])
self.assertEqual(u'2013-08-01T15:37:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T16:11:00', r['period_end'])
elif (grp == {'project_id': 'project-2'} and
period_start == u'2013-08-01T16:11:00'):
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
self.assertEqual(0, r['duration'])
self.assertEqual(u'2013-08-01T17:28:00', r['duration_start'])
self.assertEqual(u'2013-08-01T17:28:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T18:11:00', r['period_end'])
else:
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-1'},
u'2013-08-01T16:11:00'])
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-2'},
u'2013-08-01T10:11:00'])
def test_group_by_with_query_filter_and_period(self):
data = self.get_json(self.PATH,
q=[{'field': 'source',
'op': 'eq',
'value': 'source-1'}],
groupby=['project_id'],
period=7200)
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
period_start_set = set(sub_dict['period_start'] for sub_dict in data)
period_start_valid = set([u'2013-08-01T10:11:00',
u'2013-08-01T14:11:00',
u'2013-08-01T16:11:00'])
self.assertEqual(period_start_valid, period_start_set)
for r in data:
grp = r['groupby']
period_start = r['period_start']
if (grp == {'project_id': 'project-1'} and
period_start == u'2013-08-01T10:11:00'):
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(1, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(1, r['avg'])
self.assertEqual(1740, r['duration'])
self.assertEqual(u'2013-08-01T10:11:00', r['duration_start'])
self.assertEqual(u'2013-08-01T10:40:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T12:11:00', r['period_end'])
elif (grp == {'project_id': 'project-1'} and
period_start == u'2013-08-01T14:11:00'):
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(2, r['avg'])
self.assertEqual(0, r['duration'])
self.assertEqual(u'2013-08-01T14:59:00', r['duration_start'])
self.assertEqual(u'2013-08-01T14:59:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T16:11:00', r['period_end'])
elif (grp == {'project_id': 'project-2'} and
period_start == u'2013-08-01T16:11:00'):
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
self.assertEqual(0, r['duration'])
self.assertEqual(u'2013-08-01T17:28:00', r['duration_start'])
self.assertEqual(u'2013-08-01T17:28:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T18:11:00', r['period_end'])
else:
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-1'},
u'2013-08-01T16:11:00'])
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-2'},
u'2013-08-01T10:11:00'])
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-2'},
u'2013-08-01T14:11:00'])
def test_group_by_start_timestamp_after(self):
data = self.get_json(self.PATH,
q=[{'field': 'timestamp',
'op': 'ge',
'value': '2013-08-01T17:28:01'}],
groupby=['project_id'])
self.assertEqual([], data)
def test_group_by_end_timestamp_before(self):
data = self.get_json(self.PATH,
q=[{'field': 'timestamp',
'op': 'le',
'value': '2013-08-01T10:10:59'}],
groupby=['project_id'])
self.assertEqual([], data)
def test_group_by_start_timestamp(self):
data = self.get_json(self.PATH,
q=[{'field': 'timestamp',
'op': 'ge',
'value': '2013-08-01T14:58:00'}],
groupby=['project_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'project_id': 'project-1'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'project_id': 'project-2'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(6, r['sum'])
self.assertEqual(3, r['avg'])
def test_group_by_end_timestamp(self):
data = self.get_json(self.PATH,
q=[{'field': 'timestamp',
'op': 'le',
'value': '2013-08-01T11:45:00'}],
groupby=['project_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1']), groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'project_id': 'project-1'}:
self.assertEqual(3, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(6, r['sum'])
self.assertEqual(2, r['avg'])
def test_group_by_start_end_timestamp(self):
data = self.get_json(self.PATH,
q=[{'field': 'timestamp',
'op': 'ge',
'value': '2013-08-01T08:17:03'},
{'field': 'timestamp',
'op': 'le',
'value': '2013-08-01T23:59:59'}],
groupby=['project_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'project_id': 'project-1'}:
self.assertEqual(5, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(10, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'project_id': 'project-2'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(6, r['sum'])
self.assertEqual(3, r['avg'])
def test_group_by_start_end_timestamp_with_query_filter(self):
data = self.get_json(self.PATH,
q=[{'field': 'project_id',
'op': 'eq',
'value': 'project-1'},
{'field': 'timestamp',
'op': 'ge',
'value': '2013-08-01T11:01:00'},
{'field': 'timestamp',
'op': 'le',
'value': '2013-08-01T20:00:00'}],
groupby=['resource_id'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['resource_id']), groupby_keys_set)
self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'resource_id': 'resource-1'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'resource_id': 'resource-3'}:
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
def test_group_by_start_end_timestamp_with_period(self):
data = self.get_json(self.PATH,
q=[{'field': 'timestamp',
'op': 'ge',
'value': '2013-08-01T14:00:00'},
{'field': 'timestamp',
'op': 'le',
'value': '2013-08-01T17:00:00'}],
groupby=['project_id'],
period=3600)
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
period_start_set = set(sub_dict['period_start'] for sub_dict in data)
period_start_valid = set([u'2013-08-01T14:00:00',
u'2013-08-01T15:00:00',
u'2013-08-01T16:00:00'])
self.assertEqual(period_start_valid, period_start_set)
for r in data:
grp = r['groupby']
period_start = r['period_start']
if (grp == {'project_id': 'project-1'} and
period_start == u'2013-08-01T14:00:00'):
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(2, r['avg'])
self.assertEqual(0, r['duration'])
self.assertEqual(u'2013-08-01T14:59:00', r['duration_start'])
self.assertEqual(u'2013-08-01T14:59:00', r['duration_end'])
self.assertEqual(3600, r['period'])
self.assertEqual(u'2013-08-01T15:00:00', r['period_end'])
elif (grp == {'project_id': 'project-1'} and
period_start == u'2013-08-01T16:00:00'):
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(2, r['avg'])
self.assertEqual(0, r['duration'])
self.assertEqual(u'2013-08-01T16:10:00', r['duration_start'])
self.assertEqual(u'2013-08-01T16:10:00', r['duration_end'])
self.assertEqual(3600, r['period'])
self.assertEqual(u'2013-08-01T17:00:00', r['period_end'])
elif (grp == {'project_id': 'project-2'} and
period_start == u'2013-08-01T15:00:00'):
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(2, r['avg'])
self.assertEqual(0, r['duration'])
self.assertEqual(u'2013-08-01T15:37:00', r['duration_start'])
self.assertEqual(u'2013-08-01T15:37:00', r['duration_end'])
self.assertEqual(3600, r['period'])
self.assertEqual(u'2013-08-01T16:00:00', r['period_end'])
else:
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-1'},
u'2013-08-01T15:00:00'])
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-2'},
u'2013-08-01T14:00:00'])
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-2'},
u'2013-08-01T16:00:00'])
def test_group_by_start_end_timestamp_with_query_filter_and_period(self):
data = self.get_json(self.PATH,
q=[{'field': 'source',
'op': 'eq',
'value': 'source-1'},
{'field': 'timestamp',
'op': 'ge',
'value': '2013-08-01T10:00:00'},
{'field': 'timestamp',
'op': 'le',
'value': '2013-08-01T18:00:00'}],
groupby=['project_id'],
period=7200)
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
period_start_set = set(sub_dict['period_start'] for sub_dict in data)
period_start_valid = set([u'2013-08-01T10:00:00',
u'2013-08-01T14:00:00',
u'2013-08-01T16:00:00'])
self.assertEqual(period_start_valid, period_start_set)
for r in data:
grp = r['groupby']
period_start = r['period_start']
if (grp == {'project_id': 'project-1'} and
period_start == u'2013-08-01T10:00:00'):
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(1, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(1, r['avg'])
self.assertEqual(1740, r['duration'])
self.assertEqual(u'2013-08-01T10:11:00', r['duration_start'])
self.assertEqual(u'2013-08-01T10:40:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T12:00:00', r['period_end'])
elif (grp == {'project_id': 'project-1'} and
period_start == u'2013-08-01T14:00:00'):
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(2, r['sum'])
self.assertEqual(2, r['avg'])
self.assertEqual(0, r['duration'])
self.assertEqual(u'2013-08-01T14:59:00', r['duration_start'])
self.assertEqual(u'2013-08-01T14:59:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T16:00:00', r['period_end'])
elif (grp == {'project_id': 'project-2'} and
period_start == u'2013-08-01T16:00:00'):
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
self.assertEqual(0, r['duration'])
self.assertEqual(u'2013-08-01T17:28:00', r['duration_start'])
self.assertEqual(u'2013-08-01T17:28:00', r['duration_end'])
self.assertEqual(7200, r['period'])
self.assertEqual(u'2013-08-01T18:00:00', r['period_end'])
else:
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-1'},
u'2013-08-01T16:00:00'])
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-2'},
u'2013-08-01T10:00:00'])
self.assertNotEqual([grp, period_start],
[{'project_id': 'project-2'},
u'2013-08-01T14:00:00'])
@tests_db.run_with('mongodb', 'hbase', 'db2')
class TestGroupBySource(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
# FIXME(terriyu): We have to put test_group_by_source in its own class
# because SQLAlchemy currently doesn't support group by source statistics.
# When group by source is supported in SQLAlchemy, this test should be
# moved to TestGroupByInstance with all the other group by statistics
# tests.
PATH = '/meters/instance/statistics'
def setUp(self):
super(TestGroupBySource, self).setUp()
test_sample_data = (
{'volume': 2, 'user': 'user-1', 'project': 'project-1',
'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10),
'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1',
'source': 'source-2'},
{'volume': 2, 'user': 'user-1', 'project': 'project-2',
'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-1',
'source': 'source-2'},
{'volume': 1, 'user': 'user-2', 'project': 'project-1',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11),
'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
'source': 'source-1'},
{'volume': 1, 'user': 'user-2', 'project': 'project-1',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source-1'},
{'volume': 2, 'user': 'user-2', 'project': 'project-1',
'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source-1'},
{'volume': 4, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source-1'},
{'volume': 4, 'user': 'user-3', 'project': 'project-1',
'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22),
'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
'source': 'source-3'},
)
for test_sample in test_sample_data:
c = sample.Sample(
'instance',
sample.TYPE_CUMULATIVE,
unit='s',
volume=test_sample['volume'],
user_id=test_sample['user'],
project_id=test_sample['project'],
resource_id=test_sample['resource'],
timestamp=datetime.datetime(*test_sample['timestamp']),
resource_metadata={'flavor': test_sample['metadata_flavor'],
'event': test_sample['metadata_event'], },
source=test_sample['source'],
)
msg = utils.meter_message_from_counter(
c, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
def tearDown(self):
self.conn.clear()
super(TestGroupBySource, self).tearDown()
def test_group_by_source(self):
data = self.get_json(self.PATH, groupby=['source'])
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['source']), groupby_keys_set)
self.assertEqual(set(['source-1', 'source-2', 'source-3']),
groupby_vals_set)
for r in data:
grp = r['groupby']
if grp == {'source': 'source-1'}:
self.assertEqual(4, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(1, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(8, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'source': 'source-2'}:
self.assertEqual(2, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(2, r['min'])
self.assertEqual(2, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(2, r['avg'])
elif grp == {'source': 'source-3'}:
self.assertEqual(1, r['count'])
self.assertEqual('s', r['unit'])
self.assertEqual(4, r['min'])
self.assertEqual(4, r['max'])
self.assertEqual(4, r['sum'])
self.assertEqual(4, r['avg'])
class TestSelectableAggregates(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
PATH = '/meters/instance/statistics'
def setUp(self):
super(TestSelectableAggregates, self).setUp()
test_sample_data = (
{'volume': 2, 'user': 'user-1', 'project': 'project-1',
'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10),
'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1',
'source': 'source'},
{'volume': 2, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-1',
'source': 'source'},
{'volume': 1, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11),
'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 2, 'user': 'user-1', 'project': 'project-1',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 2, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 5, 'user': 'user-1', 'project': 'project-1',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 4, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 9, 'user': 'user-3', 'project': 'project-3',
'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-3',
'source': 'source'},
)
for test_sample in test_sample_data:
c = sample.Sample(
'instance',
sample.TYPE_GAUGE,
unit='instance',
volume=test_sample['volume'],
user_id=test_sample['user'],
project_id=test_sample['project'],
resource_id=test_sample['resource'],
timestamp=datetime.datetime(*test_sample['timestamp']),
resource_metadata={'flavor': test_sample['metadata_flavor'],
'event': test_sample['metadata_event'], },
source=test_sample['source'],
)
msg = utils.meter_message_from_counter(
c, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
def _do_test_per_tenant_selectable_standard_aggregate(self,
aggregate,
expected_values):
agg_args = {'aggregate.func': aggregate}
data = self.get_json(self.PATH, groupby=['project_id'], **agg_args)
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
projects = ['project-1', 'project-2', 'project-3']
self.assertEqual(set(projects), groupby_vals_set)
standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
for r in data:
grp = r['groupby']
for project in projects:
if grp == {'project_id': project}:
expected = expected_values[projects.index(project)]
self.assertEqual('instance', r['unit'])
self.assertAlmostEqual(r[aggregate], expected)
self.assertIn('aggregate', r)
self.assertIn(aggregate, r['aggregate'])
self.assertAlmostEqual(r['aggregate'][aggregate], expected)
for a in standard_aggregates - set([aggregate]):
self.assertNotIn(a, r)
def test_per_tenant_selectable_max(self):
self._do_test_per_tenant_selectable_standard_aggregate('max',
[5, 4, 9])
def test_per_tenant_selectable_min(self):
self._do_test_per_tenant_selectable_standard_aggregate('min',
[2, 1, 9])
def test_per_tenant_selectable_sum(self):
self._do_test_per_tenant_selectable_standard_aggregate('sum',
[9, 9, 9])
def test_per_tenant_selectable_avg(self):
self._do_test_per_tenant_selectable_standard_aggregate('avg',
[3, 2.25, 9])
def test_per_tenant_selectable_count(self):
self._do_test_per_tenant_selectable_standard_aggregate('count',
[3, 4, 1])
def test_per_tenant_selectable_parameterized_aggregate(self):
agg_args = {'aggregate.func': 'cardinality',
'aggregate.param': 'resource_id'}
data = self.get_json(self.PATH, groupby=['project_id'], **agg_args)
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
projects = ['project-1', 'project-2', 'project-3']
self.assertEqual(set(projects), groupby_vals_set)
aggregate = 'cardinality/resource_id'
expected_values = [2.0, 3.0, 1.0]
standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
for r in data:
grp = r['groupby']
for project in projects:
if grp == {'project_id': project}:
expected = expected_values[projects.index(project)]
self.assertEqual('instance', r['unit'])
self.assertNotIn(aggregate, r)
self.assertIn('aggregate', r)
self.assertIn(aggregate, r['aggregate'])
self.assertEqual(expected, r['aggregate'][aggregate])
for a in standard_aggregates:
self.assertNotIn(a, r)
def test_large_quantum_selectable_parameterized_aggregate(self):
# add a large number of datapoints that won't impact on cardinality
# if the computation logic is tolerant of different DB behavior on
# larger numbers of samples per-period
for i in range(200):
s = sample.Sample(
'instance',
sample.TYPE_GAUGE,
unit='instance',
volume=i * 1.0,
user_id='user-1',
project_id='project-1',
resource_id='resource-1',
timestamp=datetime.datetime(2013, 8, 1, 11, i % 60),
resource_metadata={'flavor': 'm1.tiny',
'event': 'event-1', },
source='source',
)
msg = utils.meter_message_from_counter(
s, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
agg_args = {'aggregate.func': 'cardinality',
'aggregate.param': 'resource_id'}
data = self.get_json(self.PATH, **agg_args)
aggregate = 'cardinality/resource_id'
expected_value = 5.0
standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
r = data[0]
self.assertNotIn(aggregate, r)
self.assertIn('aggregate', r)
self.assertIn(aggregate, r['aggregate'])
self.assertEqual(expected_value, r['aggregate'][aggregate])
for a in standard_aggregates:
self.assertNotIn(a, r)
def test_repeated_unparameterized_aggregate(self):
agg_params = 'aggregate.func=count&aggregate.func=count'
data = self.get_json(self.PATH, override_params=agg_params)
aggregate = 'count'
expected_value = 8.0
standard_aggregates = set(['min', 'max', 'sum', 'avg'])
r = data[0]
self.assertIn(aggregate, r)
self.assertEqual(expected_value, r[aggregate])
self.assertIn('aggregate', r)
self.assertIn(aggregate, r['aggregate'])
self.assertEqual(expected_value, r['aggregate'][aggregate])
for a in standard_aggregates:
self.assertNotIn(a, r)
def test_fully_repeated_parameterized_aggregate(self):
agg_params = ('aggregate.func=cardinality&'
'aggregate.param=resource_id&'
'aggregate.func=cardinality&'
'aggregate.param=resource_id&')
data = self.get_json(self.PATH, override_params=agg_params)
aggregate = 'cardinality/resource_id'
expected_value = 5.0
standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
r = data[0]
self.assertIn('aggregate', r)
self.assertNotIn(aggregate, r)
self.assertIn(aggregate, r['aggregate'])
self.assertEqual(expected_value, r['aggregate'][aggregate])
for a in standard_aggregates:
self.assertNotIn(a, r)
def test_partially_repeated_parameterized_aggregate(self):
agg_params = ('aggregate.func=cardinality&'
'aggregate.param=resource_id&'
'aggregate.func=cardinality&'
'aggregate.param=project_id&')
data = self.get_json(self.PATH, override_params=agg_params)
expected_values = {'cardinality/resource_id': 5.0,
'cardinality/project_id': 3.0}
standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
r = data[0]
self.assertIn('aggregate', r)
for aggregate in expected_values.keys():
self.assertNotIn(aggregate, r)
self.assertIn(aggregate, r['aggregate'])
self.assertEqual(expected_values[aggregate],
r['aggregate'][aggregate])
for a in standard_aggregates:
self.assertNotIn(a, r)
def test_bad_selectable_parameterized_aggregate(self):
agg_args = {'aggregate.func': 'cardinality',
'aggregate.param': 'injection_attack'}
resp = self.get_json(self.PATH, status=[400],
groupby=['project_id'], **agg_args)
self.assertIn('error_message', resp)
self.assertEqual(resp['error_message'].get('faultcode'),
'Client')
self.assertEqual(resp['error_message'].get('faultstring'),
'Bad aggregate: cardinality.injection_attack')
@tests_db.run_with('mongodb', 'hbase', 'db2')
class TestUnparameterizedAggregates(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
# We put the stddev test case in a separate class so that we
# can easily exclude the sqlalchemy scenario, as sqlite doesn't
# support the stddev_pop function and fails ungracefully with
# OperationalError when it is used. However we still want to
# test the corresponding functionality in the mongo driver.
# For hbase & db2, the skip on NotImplementedError logic works
# in the usual way.
PATH = '/meters/instance/statistics'
def setUp(self):
super(TestUnparameterizedAggregates, self).setUp()
test_sample_data = (
{'volume': 2, 'user': 'user-1', 'project': 'project-1',
'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10),
'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1',
'source': 'source'},
{'volume': 2, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-1',
'source': 'source'},
{'volume': 1, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11),
'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 2, 'user': 'user-1', 'project': 'project-1',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 2, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 5, 'user': 'user-1', 'project': 'project-1',
'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 4, 'user': 'user-2', 'project': 'project-2',
'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
'source': 'source'},
{'volume': 9, 'user': 'user-3', 'project': 'project-3',
'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59),
'metadata_flavor': 'm1.large', 'metadata_event': 'event-3',
'source': 'source'},
)
for test_sample in test_sample_data:
c = sample.Sample(
'instance',
sample.TYPE_GAUGE,
unit='instance',
volume=test_sample['volume'],
user_id=test_sample['user'],
project_id=test_sample['project'],
resource_id=test_sample['resource'],
timestamp=datetime.datetime(*test_sample['timestamp']),
resource_metadata={'flavor': test_sample['metadata_flavor'],
'event': test_sample['metadata_event'], },
source=test_sample['source'],
)
msg = utils.meter_message_from_counter(
c, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
def test_per_tenant_selectable_unparameterized_aggregate(self):
agg_args = {'aggregate.func': 'stddev'}
data = self.get_json(self.PATH, groupby=['project_id'], **agg_args)
groupby_keys_set = set(x for sub_dict in data
for x in sub_dict['groupby'].keys())
groupby_vals_set = set(x for sub_dict in data
for x in sub_dict['groupby'].values())
self.assertEqual(set(['project_id']), groupby_keys_set)
projects = ['project-1', 'project-2', 'project-3']
self.assertEqual(set(projects), groupby_vals_set)
aggregate = 'stddev'
expected_values = [1.4142, 1.0897, 0.0]
standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
for r in data:
grp = r['groupby']
for project in projects:
if grp == {'project_id': project}:
expected = expected_values[projects.index(project)]
self.assertEqual('instance', r['unit'])
self.assertNotIn(aggregate, r)
self.assertIn('aggregate', r)
self.assertIn(aggregate, r['aggregate'])
self.assertAlmostEqual(r['aggregate'][aggregate],
expected,
places=4)
for a in standard_aggregates:
self.assertNotIn(a, r)
|
jxs/servo | refs/heads/master | tests/wpt/css-tests/css-fonts-3_dev/xhtml1print/reference/support/fonts/makegsubfonts.py | 1616 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
|
Bernardinhouessou/Projets_Autres | refs/heads/master | Python-Projects/Scripts/Algorithms-master/sorting and basics/binary_search.py | 2 | from sorting import mergesort
def search(arr, item):
"""Performs binary search on an array
with the given item and returns True or
False.
>>> search([5, 4, 1, 6, 2, 3, 9, 7], 2)
True
>>> search([5, 4, 1, 6, 2, 3, 9, 7], 8)
False
"""
arr = mergesort(arr)
first = 0
last = len(arr) - 1
found = False
while first <= last and not found:
midpoint = (first + last) // 2
if arr[midpoint] == item:
found = True
else:
if item < arr[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return found
print search([5, 4, 1, 6, 2, 3, 9, 7], 2)
print search([5, 4, 1, 6, 2, 3, 9, 7], 8)
|
davechallis/gensim | refs/heads/develop | gensim/test/test_ldamodel.py | 8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import os.path
import tempfile
import numbers
import six
import numpy
import scipy.linalg
from gensim.corpora import mmcorpus, Dictionary
from gensim.models import ldamodel, ldamulticore
from gensim import matutils
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_models.tst')
class TestLdaModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamodel.LdaModel
self.model = self.class_(corpus, id2word=dictionary, num_topics=2, passes=100)
def testTransform(self):
passed = False
# sometimes, LDA training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
# better random initialization
for i in range(25): # restart at most 5 times
# create the transformation model
model = self.class_(id2word=dictionary, num_topics=2, passes=100)
model.update(self.corpus)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.13, 0.87]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-1) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testAlphaAuto(self):
model1 = self.class_(corpus, id2word=dictionary, alpha='symmetric', passes=10)
modelauto = self.class_(corpus, id2word=dictionary, alpha='auto', passes=10)
# did we learn something?
self.assertFalse(all(numpy.equal(model1.alpha, modelauto.alpha)))
def testAlpha(self):
kwargs = dict(
id2word=dictionary,
num_topics=2,
alpha=None
)
expected_shape = (2,)
# should not raise anything
self.class_(**kwargs)
kwargs['alpha'] = 'symmetric'
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == numpy.array([0.5, 0.5])))
kwargs['alpha'] = 'asymmetric'
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(numpy.allclose(model.alpha, [0.630602, 0.369398]))
kwargs['alpha'] = 0.3
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == numpy.array([0.3, 0.3])))
kwargs['alpha'] = 3
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == numpy.array([3, 3])))
kwargs['alpha'] = [0.3, 0.3]
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == numpy.array([0.3, 0.3])))
kwargs['alpha'] = numpy.array([0.3, 0.3])
model = self.class_(**kwargs)
self.assertEqual(model.alpha.shape, expected_shape)
self.assertTrue(all(model.alpha == numpy.array([0.3, 0.3])))
# all should raise an exception for being wrong shape
kwargs['alpha'] = [0.3, 0.3, 0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['alpha'] = [[0.3], [0.3]]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['alpha'] = [0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['alpha'] = "gensim is cool"
self.assertRaises(ValueError, self.class_, **kwargs)
def testEtaAuto(self):
model1 = self.class_(corpus, id2word=dictionary, eta='symmetric', passes=10)
modelauto = self.class_(corpus, id2word=dictionary, eta='auto', passes=10)
# did we learn something?
self.assertFalse(all(numpy.equal(model1.eta, modelauto.eta)))
def testEta(self):
kwargs = dict(
id2word=dictionary,
num_topics=2,
eta=None
)
expected_shape = (2, 1)
# should not raise anything
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == numpy.array([[0.5], [0.5]])))
kwargs['eta'] = 'symmetric'
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == numpy.array([[0.5], [0.5]])))
kwargs['eta'] = 'asymmetric'
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(numpy.allclose(model.eta, [[0.630602], [0.369398]]))
kwargs['eta'] = 0.3
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == numpy.array([[0.3], [0.3]])))
kwargs['eta'] = 3
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == numpy.array([[3], [3]])))
kwargs['eta'] = [[0.3], [0.3]]
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == numpy.array([[0.3], [0.3]])))
kwargs['eta'] = [0.3, 0.3]
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == numpy.array([[0.3], [0.3]])))
kwargs['eta'] = numpy.array([0.3, 0.3])
model = self.class_(**kwargs)
self.assertEqual(model.eta.shape, expected_shape)
self.assertTrue(all(model.eta == numpy.array([[0.3], [0.3]])))
# should be ok with num_topics x num_terms
testeta = numpy.array([[0.5] * len(dictionary)] * 2)
kwargs['eta'] = testeta
self.class_(**kwargs)
# all should raise an exception for being wrong shape
kwargs['eta'] = testeta.reshape(tuple(reversed(testeta.shape)))
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['eta'] = [0.3, 0.3, 0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['eta'] = [0.3]
self.assertRaises(AssertionError, self.class_, **kwargs)
kwargs['eta'] = "gensim is cool"
self.assertRaises(ValueError, self.class_, **kwargs)
def testTopTopics(self):
top_topics = self.model.top_topics(self.corpus)
for topic, score in top_topics:
self.assertTrue(isinstance(topic, list))
self.assertTrue(isinstance(score, float))
for v, k in topic:
self.assertTrue(isinstance(k, six.string_types))
self.assertTrue(isinstance(v, float))
def testGetTopicTerms(self):
topic_terms = self.model.get_topic_terms(1)
for k, v in topic_terms:
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(isinstance(v, float))
def testShowTopic(self):
topic = self.model.show_topic(1)
for k, v in topic:
self.assertTrue(isinstance(k, six.string_types))
self.assertTrue(isinstance(v, float))
def testShowTopics(self):
topics = self.model.show_topics(formatted=False)
for topic_no, topic in topics:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(topic, list))
for k, v in topic:
self.assertTrue(isinstance(k, six.string_types))
self.assertTrue(isinstance(v, float))
def testGetDocumentTopics(self):
doc_topics = self.model.get_document_topics(self.corpus)
for topic in doc_topics:
self.assertTrue(isinstance(topic, list))
for k, v in topic:
self.assertTrue(isinstance(k, int))
self.assertTrue(isinstance(v, float))
doc_topics = self.model.get_document_topics(self.corpus[0])
for k, v in doc_topics:
self.assertTrue(isinstance(k, int))
self.assertTrue(isinstance(v, float))
def testPasses(self):
# long message includes the original error message with a custom one
self.longMessage = True
# construct what we expect when passes aren't involved
test_rhots = list()
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2)
final_rhot = lambda: pow(model.offset + (1 * model.num_updates) / model.chunksize, -model.decay)
# generate 5 updates to test rhot on
for x in range(5):
model.update(self.corpus)
test_rhots.append(final_rhot())
for passes in [1, 5, 10, 50, 100]:
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2, passes=passes)
self.assertEqual(final_rhot(), 1.0)
# make sure the rhot matches the test after each update
for test_rhot in test_rhots:
model.update(self.corpus)
msg = ", ".join(map(str, [passes, model.num_updates, model.state.numdocs]))
self.assertAlmostEqual(final_rhot(), test_rhot, msg=msg)
self.assertEqual(model.state.numdocs, len(corpus) * len(test_rhots))
self.assertEqual(model.num_updates, len(corpus) * len(test_rhots))
# def testTopicSeeding(self):
# for topic in range(2):
# passed = False
# for i in range(5): # restart at most this many times, to mitigate LDA randomness
# # try seeding it both ways round, check you get the same
# # topics out but with which way round they are depending
# # on the way round they're seeded
# eta = numpy.ones((2, len(dictionary))) * 0.5
# system = dictionary.token2id[u'system']
# trees = dictionary.token2id[u'trees']
# # aggressively seed the word 'system', in one of the
# # two topics, 10 times higher than the other words
# eta[topic, system] *= 10.0
# model = self.class_(id2word=dictionary, num_topics=2, passes=200, eta=eta)
# model.update(self.corpus)
# topics = [dict((word, p) for p, word in model.show_topic(j, topn=None)) for j in range(2)]
# # check that the word 'system' in the topic we seeded got a high weight,
# # and the word 'trees' (the main word in the other topic) a low weight --
# # and vice versa for the other topic (which we didn't seed with 'system')
# passed = (
# (topics[topic][u'system'] > topics[topic][u'trees'])
# and
# (topics[1 - topic][u'system'] < topics[1 - topic][u'trees'])
# )
# if passed:
# break
# logging.warning("LDA failed to converge on attempt %i (got %s)", i, topics)
# self.assertTrue(passed)
def testPersistence(self):
fname = testfile()
model = self.model
model.save(fname)
model2 = self.class_.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceIgnore(self):
fname = testfile()
model = ldamodel.LdaModel(self.corpus, num_topics=2)
model.save(fname, ignore='id2word')
model2 = ldamodel.LdaModel.load(fname)
self.assertTrue(model2.id2word is None)
model.save(fname, ignore=['id2word'])
model2 = ldamodel.LdaModel.load(fname)
self.assertTrue(model2.id2word is None)
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = self.model
model.save(fname)
model2 = self.class_.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = testfile()
model = self.model
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
# test loading the large model arrays with mmap
model2 = self.class_.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.expElogbeta, numpy.memmap))
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = testfile() + '.gz'
model = self.model
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, self.class_.load, fname, mmap='r')
#endclass TestLdaModel
class TestLdaMulticore(TestLdaModel):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamulticore.LdaMulticore
self.model = self.class_(corpus, id2word=dictionary, num_topics=2, passes=100)
# override LdaModel because multicore does not allow alpha=auto
def testAlphaAuto(self):
self.assertRaises(RuntimeError, self.class_, alpha='auto')
#endclass TestLdaMulticore
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
pathawks/pygments | refs/heads/master | pygments/formatters/terminal.py | 363 | # -*- coding: utf-8 -*-
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.console import ansiformat
from pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('lightgray', 'darkgray'),
Comment: ('lightgray', 'darkgray'),
Comment.Preproc: ('teal', 'turquoise'),
Keyword: ('darkblue', 'blue'),
Keyword.Type: ('teal', 'turquoise'),
Operator.Word: ('purple', 'fuchsia'),
Name.Builtin: ('teal', 'turquoise'),
Name.Function: ('darkgreen', 'green'),
Name.Namespace: ('_teal_', '_turquoise_'),
Name.Class: ('_darkgreen_', '_green_'),
Name.Exception: ('teal', 'turquoise'),
Name.Decorator: ('darkgray', 'lightgray'),
Name.Variable: ('darkred', 'red'),
Name.Constant: ('darkred', 'red'),
Name.Attribute: ('teal', 'turquoise'),
Name.Tag: ('blue', 'blue'),
String: ('brown', 'brown'),
Number: ('darkblue', 'blue'),
Generic.Deleted: ('red', 'red'),
Generic.Inserted: ('darkgreen', 'green'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*purple*', '*fuchsia*'),
Generic.Error: ('red', 'red'),
Error: ('_red_', '_red_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty() and \
sys.version_info < (3,):
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
|
cad-lab/blog | refs/heads/gh-pages | plugin/liquid_tags/gram.py | 271 | """
Instagram Image Tag
-------------------
By `Tom Spalding <https://github.com/digitalvapor>`_
You can see a working example at `antivapor.net/instagram-tag.html <http://antivapor.net/instagram-tag.html>`_.
Based on `Liquid Image Tag <https://github.com/getpelican/pelican-plugins/blob/master/liquid_tags/img.py>`_ by `Jake Vanderplas <https://github.com/jakevdp>`_.
Optional Todo:
* Query JSON to automatically include descriptions.
http://api.instagram.com/oembed?url=http://instagr.am/p/olw8jXiz1_/
and option to add wrapping anchor link to original http://instagram.com/p/olw8jXiz1_
* Default to size m
http://instagr.am/p/olw8jXiz1_/media/?size=t
http://instagr.am/p/olw8jXiz1_/media
* Provide examples using with [Better Figures and Images](https://github.com/getpelican/pelican-plugins/tree/master/better_figures_and_images).
Syntax
------
{% gram shortcode [size] [width] [class name(s)] [title text | "title text" ["alt text"]] %}
where size is t, m, or l, and it defaults to m. see http://instagram.com/developer/embedding.
Examples
--------
{% gram pFG7naIZkr t %}
{% gram pFJE11IZnx %}
{% gram pFI0CAIZna l 400 figure 'pretty turkey tail fungus' %}
{% gram rOru21oZpe l 450 test_class instagram 'warehouse window title' 'alt text' %}
Output
------
<img src="http://photos-c.ak.instagram.com/hphotos-ak-xaf1/t51.2885-15/917172_604907902963826_254280879_n.jpg" width="450" title="warehouse window title" alt="alt text" class="test_class instagram">
"""
import re
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% gram shortcode [size] [width] [class name(s)] [title text | "title text" ["alt text"]] %}'
# Regular expression for full syntax
# ReGram = re.compile("""(?P<shortcode>\S+)(?:\s+(?P<size>[tml]?))?(?:\s+(?P<width>\d*))?(?:\s+(?P<class>\S*))?(?P<title>\s+.+)?""")
ReGram = re.compile("""(?P<shortcode>\S+)(?:\s+(?P<size>[tml]?))?(?:\s+(?P<width>\d*))?(?:\s+(?P<class>[^']*))?(?P<title>.+)?""")
# Regular expression to split the title and alt text
ReTitleAlt = re.compile("""(?:"|')(?P<title>[^"']+)?(?:"|')\s+(?:"|')(?P<alt>[^"']+)?(?:"|')""")
@LiquidTags.register('gram')
def gram(preprocessor, tag, markup):
attrs = None
# Parse the markup string
match = ReGram.search(markup)
if match:
attrs = dict([(key, val.strip())
for (key, val) in match.groupdict().items() if val])
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# Construct URI
#print(attrs)
shortcode = attrs['shortcode']
url = 'http://instagr.am/p/'+shortcode+'/media/'
del attrs['shortcode']
if 'size' in attrs:
size = '?size={0}'.format(attrs['size'])
url = url+size
del attrs['size']
r = urlopen(url)
if(r.getcode()==404):
raise ValueError('%s isnt a photo.'%shortcode)
gram_url = r.geturl()
# Check if alt text is present -- if so, split it from title
if 'title' in attrs:
match = ReTitleAlt.search(attrs['title'])
if match:
attrs.update(match.groupdict())
if not attrs.get('alt'):
attrs['alt'] = attrs['title']
#print('updated dict: '+repr(attrs))
# Return the formatted text
return '<img src="{0}"{1}>'.format(gram_url,' '.join(' {0}="{1}"'.format(key,val) for (key,val) in attrs.items()))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
|
nburn42/tensorflow | refs/heads/master | tensorflow/contrib/tensor_forest/python/kernel_tests/scatter_add_ndim_op_test.py | 114 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.scatter_add_ndim_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class ScatterAddNdimTest(test_util.TensorFlowTestCase):
def test1dim(self):
input_data = variables.Variable(
[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.])
indices = [[1], [10]]
updates = [100., 200.]
with self.test_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(
[1., 102., 3., 4., 5., 6., 7., 8., 9., 10., 211., 12.],
input_data.eval())
def test3dim(self):
input_data = variables.Variable([[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]])
indices = [[0, 0, 1], [1, 1, 2]]
updates = [100., 200.]
with self.test_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual([[[1., 102., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 212.]]], input_data.eval())
def testNoUpdates(self):
init_val = [[[1., 2., 3.], [4., 5., 6.]], [[7., 8., 9.], [10., 11., 12.]]]
input_data = variables.Variable(init_val)
indices = []
updates = []
with self.test_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(init_val, input_data.eval())
def testBadInput(self):
init_val = [[[1., 2., 3.], [4., 5., 6.]], [[7., 8., 9.], [10., 11., 12.]]]
input_data = variables.Variable(init_val)
indices = [[0, 0, 1], [1, 1, 2]]
updates = [100.]
with self.test_session():
variables.global_variables_initializer().run()
with self.assertRaisesOpError(
'Number of updates should be same as number of indices.'):
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual(init_val, input_data.eval())
def testIncompleteIndices(self):
input_data = variables.Variable([[[1., 2., 3.], [4., 5., 6.]],
[[7., 8., 9.], [10., 11., 12.]]])
indices = [[0, 0], [1, 1]]
updates = [[100., 200., 300.], [400., 500., 600.]]
with self.test_session():
variables.global_variables_initializer().run()
tensor_forest_ops.scatter_add_ndim(input_data, indices, updates).run()
self.assertAllEqual([[[101., 202., 303.], [4., 5., 6.]],
[[7., 8., 9.], [410., 511., 612.]]],
input_data.eval())
if __name__ == '__main__':
googletest.main()
|
willmarkley/valumodel.com | refs/heads/master | scripts/dcf.py | 1 | #!/usr/bin/python
import sys
sys.path.append('/var/www/html/valumodel.com/scripts/dcf')
from calc_dcf import calc_dcf
def create_dcf(req, tax_rate, growth_rate_1_year_out, sga_of_sales, da_of_sales, capex_of_sales, nwc_of_sales, levered_beta, current_yield, exit_multiple, ticker):
assumptions = {}
try:
assumptions['Tax Rate'] = float(tax_rate)/100.0
assumptions['Growth Rate 1 year out'] = float(growth_rate_1_year_out)/100.0
assumptions['SGA % of sales'] = float(sga_of_sales)/100.0
assumptions['D&A % of sales'] = float(da_of_sales)/100.0
assumptions['CAPEX % of sales'] = float(capex_of_sales)/100.0
assumptions['NWC % of sales'] = float(nwc_of_sales)/100.0
assumptions['Levered Beta'] = float(levered_beta)
assumptions['Current Yield'] = float(current_yield)/100.0
assumptions['Exit Multiple'] = float(exit_multiple)
except ValueError:
return '<!doctype html><html><body><h1>Invalid DCF Input. Please try again.</h1></body></html>'
ticker = ticker.split(' ')[0]
if not ticker.isalnum():
return '<!doctype html><html><body><h1>Invalid Ticker. Please try again.</h1></body></html>'
return calc_dcf(assumptions, ticker.upper())
|
jjmleiro/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/contrib/sessions/exceptions.py | 931 | from django.core.exceptions import SuspiciousOperation
class InvalidSessionKey(SuspiciousOperation):
"""Invalid characters in session key"""
pass
class SuspiciousSession(SuspiciousOperation):
"""The session may be tampered with"""
pass
|
gabbayo/git-repo | refs/heads/master | editor.py | 2 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import sys
import subprocess
import tempfile
from error import EditorError
import platform_utils
class Editor(object):
"""Manages the user's preferred text editor."""
_editor = None
globalConfig = None
@classmethod
def _GetEditor(cls):
if cls._editor is None:
cls._editor = cls._SelectEditor()
return cls._editor
@classmethod
def _SelectEditor(cls):
e = os.getenv('GIT_EDITOR')
if e:
return e
if cls.globalConfig:
e = cls.globalConfig.GetString('core.editor')
if e:
return e
e = os.getenv('VISUAL')
if e:
return e
e = os.getenv('EDITOR')
if e:
return e
if os.getenv('TERM') == 'dumb':
print(
"""No editor specified in GIT_EDITOR, core.editor, VISUAL or EDITOR.
Tried to fall back to vi but terminal is dumb. Please configure at
least one of these before using this command.""", file=sys.stderr)
sys.exit(1)
return 'vi'
@classmethod
def EditString(cls, data):
"""Opens an editor to edit the given content.
Args:
data : the text to edit
Returns:
new value of edited text; None if editing did not succeed
"""
editor = cls._GetEditor()
if editor == ':':
return data
fd, path = tempfile.mkstemp()
try:
os.write(fd, data)
os.close(fd)
fd = None
if platform_utils.isWindows():
# Split on spaces, respecting quoted strings
import shlex
args = shlex.split(editor)
shell = False
elif re.compile("^.*[$ \t'].*$").match(editor):
args = [editor + ' "$@"', 'sh']
shell = True
else:
args = [editor]
shell = False
args.append(path)
try:
rc = subprocess.Popen(args, shell=shell).wait()
except OSError as e:
raise EditorError('editor failed, %s: %s %s'
% (str(e), editor, path))
if rc != 0:
raise EditorError('editor failed with exit status %d: %s %s'
% (rc, editor, path))
fd2 = open(path)
try:
return fd2.read()
finally:
fd2.close()
finally:
if fd:
os.close(fd)
platform_utils.remove(path)
|
philsch/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_port_facts.py | 29 | #!/usr/bin/python
# Copyright (c) 2016 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: os_port_facts
short_description: Retrieve facts about ports within OpenStack.
version_added: "2.1"
author: "David Shrewsbury (@Shrews)"
description:
- Retrieve facts about ports from OpenStack.
notes:
- Facts are placed in the C(openstack_ports) variable.
requirements:
- "python >= 2.6"
- "shade"
options:
port:
description:
- Unique name or ID of a port.
required: false
default: null
filters:
description:
- A dictionary of meta data to use for further filtering. Elements
of this dictionary will be matched against the returned port
dictionaries. Matching is currently limited to strings within
the port dictionary, or strings within nested dictionaries.
required: false
default: null
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all ports
- os_port_facts:
cloud: mycloud
# Gather facts about a single port
- os_port_facts:
cloud: mycloud
port: 6140317d-e676-31e1-8a4a-b1913814a471
# Gather facts about all ports that have device_id set to a specific value
# and with a status of ACTIVE.
- os_port_facts:
cloud: mycloud
filters:
device_id: 1038a010-3a37-4a9d-82ea-652f1da36597
status: ACTIVE
'''
RETURN = '''
openstack_ports:
description: List of port dictionaries. A subset of the dictionary keys
listed below may be returned, depending on your cloud provider.
returned: always, but can be null
type: complex
contains:
admin_state_up:
description: The administrative state of the router, which is
up (true) or down (false).
returned: success
type: boolean
sample: true
allowed_address_pairs:
description: A set of zero or more allowed address pairs. An
address pair consists of an IP address and MAC address.
returned: success
type: list
sample: []
"binding:host_id":
description: The UUID of the host where the port is allocated.
returned: success
type: string
sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
"binding:profile":
description: A dictionary the enables the application running on
the host to pass and receive VIF port-specific
information to the plug-in.
returned: success
type: dict
sample: {}
"binding:vif_details":
description: A dictionary that enables the application to pass
information about functions that the Networking API
provides.
returned: success
type: dict
sample: {"port_filter": true}
"binding:vif_type":
description: The VIF type for the port.
returned: success
type: dict
sample: "ovs"
"binding:vnic_type":
description: The virtual network interface card (vNIC) type that is
bound to the neutron port.
returned: success
type: string
sample: "normal"
device_id:
description: The UUID of the device that uses this port.
returned: success
type: string
sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
device_owner:
description: The UUID of the entity that uses this port.
returned: success
type: string
sample: "network:router_interface"
dns_assignment:
description: DNS assignment information.
returned: success
type: list
dns_name:
description: DNS name
returned: success
type: string
sample: ""
extra_dhcp_opts:
description: A set of zero or more extra DHCP option pairs.
An option pair consists of an option value and name.
returned: success
type: list
sample: []
fixed_ips:
description: The IP addresses for the port. Includes the IP address
and UUID of the subnet.
returned: success
type: list
id:
description: The UUID of the port.
returned: success
type: string
sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
ip_address:
description: The IP address.
returned: success
type: string
sample: "127.0.0.1"
mac_address:
description: The MAC address.
returned: success
type: string
sample: "00:00:5E:00:53:42"
name:
description: The port name.
returned: success
type: string
sample: "port_name"
network_id:
description: The UUID of the attached network.
returned: success
type: string
sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d"
port_security_enabled:
description: The port security status. The status is enabled (true) or disabled (false).
returned: success
type: boolean
sample: false
security_groups:
description: The UUIDs of any attached security groups.
returned: success
type: list
status:
description: The port status.
returned: success
type: string
sample: "ACTIVE"
tenant_id:
description: The UUID of the tenant who owns the network.
returned: success
type: string
sample: "51fce036d7984ba6af4f6c849f65ef00"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def main():
argument_spec = openstack_full_argument_spec(
port=dict(required=False),
filters=dict(type='dict', required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
port = module.params.pop('port')
filters = module.params.pop('filters')
try:
cloud = shade.openstack_cloud(**module.params)
ports = cloud.search_ports(port, filters)
module.exit_json(changed=False, ansible_facts=dict(
openstack_ports=ports))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
GenericStudent/home-assistant | refs/heads/dev | homeassistant/components/yamaha/__init__.py | 36 | """The yamaha component."""
|
errx/django | refs/heads/master | django/contrib/contenttypes/views.py | 74 | from __future__ import unicode_literals
from django import http
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.requests import RequestSite
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
def shortcut(request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
if not content_type.model_class():
raise http.Http404(_("Content type %(ct_id)s object has no associated model") %
{'ct_id': content_type_id})
obj = content_type.get_object_for_this_type(pk=object_id)
except (ObjectDoesNotExist, ValueError):
raise http.Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") %
{'ct_id': content_type_id, 'obj_id': object_id})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise http.Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") %
{'ct_name': content_type.name})
absurl = get_absolute_url()
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith(('http://', 'https://', '//')):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.rel.to is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = Site.objects.get_current().domain
except Site.DoesNotExist:
pass
else:
# Fall back to the current request's site.
object_domain = RequestSite(request).domain
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = request.scheme
return http.HttpResponseRedirect('%s://%s%s'
% (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
|
titilambert/alignak | refs/heads/develop | alignak/objects/servicedependency.py | 2 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, h.goebel@goebel-consult.de
# Guillaume Bour, guillaume@bour.cc
# Arthur Gautier, superbaloo@superbaloo.net
# aviau, alexandre.viau@savoirfairelinux.com
# Nicolas Dupeux, nicolas@dupeux.net
# Gerhard Lausser, gerhard.lausser@consol.de
# Grégory Starck, g.starck@gmail.com
# Alexander Springer, alex.spri@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# Christophe Simon, geektophe@gmail.com
# Jean Gabes, naparuba@gmail.com
# Romain Forlot, rforlot@yahoo.com
# Romain THERRAT, romain42@gmail.com
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""This module provides Servicedependency and Servicedependencies classes that
implements dependencies between services. Basically used for parsing.
"""
from alignak.property import BoolProp, StringProp, ListProp
from alignak.log import logger
from .item import Item, Items
class Servicedependency(Item):
"""Servicedependency class is a simple implementation of service dependency as
defined in a monitoring context (dependency period, notification_failure_criteria ..)
"""
_id = 0
my_type = "servicedependency"
# F is dep of D
# host_name Host B
# service_description Service D
# dependent_host_name Host C
# dependent_service_description Service F
# execution_failure_criteria o
# notification_failure_criteria w,u
# inherits_parent 1
# dependency_period 24x7
properties = Item.properties.copy()
properties.update({
'dependent_host_name': StringProp(),
'dependent_hostgroup_name': StringProp(default=''),
'dependent_service_description': StringProp(),
'host_name': StringProp(),
'hostgroup_name': StringProp(default=''),
'service_description': StringProp(),
'inherits_parent': BoolProp(default=False),
'execution_failure_criteria': ListProp(default=['n'], split_on_coma=True),
'notification_failure_criteria': ListProp(default=['n'], split_on_coma=True),
'dependency_period': StringProp(default=''),
'explode_hostgroup': BoolProp(default=False)
})
def get_name(self):
"""Get name based on 4 class attributes
Each attribute is substituted by '' if attribute does not exist
:return: dependent_host_name/dependent_service_description..host_name/service_description
:rtype: str
TODO: Clean this function (use format for string)
"""
return getattr(self, 'dependent_host_name', '') + '/'\
+ getattr(self, 'dependent_service_description', '') \
+ '..' + getattr(self, 'host_name', '') + '/' \
+ getattr(self, 'service_description', '')
class Servicedependencies(Items):
"""Servicedependencies manage a list of Servicedependency objects, used for parsing configuration
"""
inner_class = Servicedependency # use for know what is in items
def delete_servicesdep_by_id(self, ids):
"""Delete a list of servicedependency
:param ids: ids list to delete
:type ids: list
:return: None
"""
for s_id in ids:
del self[s_id]
def add_service_dependency(self, dep_host_name, dep_service_description,
par_host_name, par_service_description):
"""Instantiate and add a Servicedependency object to the items dict::
* notification criteria is "u,c,w"
* inherits_parent is True
:param dep_host_name: dependent host name
:type dep_host_name: str
:param dep_service_description: dependent service description
:type dep_service_description: str
:param par_host_name: host name
:type par_host_name: str
:param par_service_description: service description
:type par_service_description: str
:return: None
"""
# We create a "standard" service_dep
prop = {
'dependent_host_name': dep_host_name,
'dependent_service_description': dep_service_description,
'host_name': par_host_name,
'service_description': par_service_description,
'notification_failure_criteria': 'u,c,w',
'inherits_parent': '1',
}
servicedep = Servicedependency(prop)
self.add_item(servicedep)
def explode_hostgroup(self, svc_dep, hostgroups):
"""Explode a service dependency for each member of hostgroup
:param svc_dep: service dependency to explode
:type svc_dep: alignak.objects.servicedependency.Servicedependency
:param hostgroups: used to find hostgroup objects
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return:None
"""
# We will create a service dependency for each host part of the host group
# First get services
snames = [d.strip() for d in svc_dep.service_description.split(',')]
# And dep services
dep_snames = [d.strip() for d in svc_dep.dependent_service_description.split(',')]
# Now for each host into hostgroup we will create a service dependency object
hg_names = [n.strip() for n in svc_dep.hostgroup_name.split(',')]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
err = "ERROR: the servicedependecy got an unknown hostgroup_name '%s'" % hg_name
self.configuration_errors.append(err)
continue
hnames = []
hnames.extend([m.strip() for m in hostgroup.members])
for hname in hnames:
for dep_sname in dep_snames:
for sname in snames:
new_sd = svc_dep.copy()
new_sd.host_name = hname
new_sd.service_description = sname
new_sd.dependent_host_name = hname
new_sd.dependent_service_description = dep_sname
self.add_item(new_sd)
def explode(self, hostgroups):
"""Explode all service dependency for each member of hostgroups
Each member of dependent hostgroup or hostgroup in dependency have to get a copy of
service dependencies (quite complex to parse)
:param hostgroups: used to look for hostgroup
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
"""
# The "old" services will be removed. All services with
# more than one host or a host group will be in it
srvdep_to_remove = []
# Then for every host create a copy of the service with just the host
# because we are adding services, we can't just loop in it
servicedeps = self.items.keys()
for s_id in servicedeps:
servicedep = self.items[s_id]
# Have we to explode the hostgroup into many service?
if bool(getattr(servicedep, 'explode_hostgroup', 0)) and \
hasattr(servicedep, 'hostgroup_name'):
self.explode_hostgroup(servicedep, hostgroups)
srvdep_to_remove.append(s_id)
continue
# Get the list of all FATHER hosts and service deps
hnames = []
if hasattr(servicedep, 'hostgroup_name'):
hg_names = [n.strip() for n in servicedep.hostgroup_name.split(',')]
hg_names = [hg_name.strip() for hg_name in hg_names]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
err = "ERROR: the servicedependecy got an" \
" unknown hostgroup_name '%s'" % hg_name
hostgroup.configuration_errors.append(err)
continue
hnames.extend([m.strip() for m in hostgroup.members])
if not hasattr(servicedep, 'host_name'):
servicedep.host_name = ''
if servicedep.host_name != '':
hnames.extend([n.strip() for n in servicedep.host_name.split(',')])
snames = [d.strip() for d in servicedep.service_description.split(',')]
couples = []
for hname in hnames:
for sname in snames:
couples.append((hname.strip(), sname.strip()))
if not hasattr(servicedep, 'dependent_hostgroup_name') \
and hasattr(servicedep, 'hostgroup_name'):
servicedep.dependent_hostgroup_name = servicedep.hostgroup_name
# Now the dep part (the sons)
dep_hnames = []
if hasattr(servicedep, 'dependent_hostgroup_name'):
hg_names = [n.strip() for n in servicedep.dependent_hostgroup_name.split(',')]
hg_names = [hg_name.strip() for hg_name in hg_names]
for hg_name in hg_names:
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is None:
err = "ERROR: the servicedependecy got an " \
"unknown dependent_hostgroup_name '%s'" % hg_name
hostgroup.configuration_errors.append(err)
continue
dep_hnames.extend([m.strip() for m in hostgroup.members])
if not hasattr(servicedep, 'dependent_host_name'):
servicedep.dependent_host_name = getattr(servicedep, 'host_name', '')
if servicedep.dependent_host_name != '':
dep_hnames.extend([n.strip() for n in servicedep.dependent_host_name.split(',')])
dep_snames = [d.strip() for d in servicedep.dependent_service_description.split(',')]
dep_couples = []
for dep_hname in dep_hnames:
for dep_sname in dep_snames:
dep_couples.append((dep_hname.strip(), dep_sname.strip()))
# Create the new service deps from all of this.
for (dep_hname, dep_sname) in dep_couples: # the sons, like HTTP
for (hname, sname) in couples: # the fathers, like MySQL
new_sd = servicedep.copy()
new_sd.host_name = hname
new_sd.service_description = sname
new_sd.dependent_host_name = dep_hname
new_sd.dependent_service_description = dep_sname
self.add_item(new_sd)
# Ok so we can remove the old one
srvdep_to_remove.append(s_id)
self.delete_servicesdep_by_id(srvdep_to_remove)
def linkify(self, hosts, services, timeperiods):
"""Create link between objects::
* servicedependency -> host
* servicedependency -> service
* servicedependency -> timeperiods
:param hosts: hosts to link
:type hosts: alignak.objects.host.Hosts
:param services: services to link
:type services: alignak.objects.service.Services
:param timeperiods: timeperiods to link
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
self.linkify_sd_by_s(hosts, services)
self.linkify_sd_by_tp(timeperiods)
self.linkify_s_by_sd()
def linkify_sd_by_s(self, hosts, services):
"""Replace dependent_service_description and service_description
in service dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:param services: service list to look for a specific one
:type services: alignak.objects.service.Services
:return: None
"""
to_del = []
errors = self.configuration_errors
warns = self.configuration_warnings
for servicedep in self:
try:
s_name = servicedep.dependent_service_description
hst_name = servicedep.dependent_host_name
# The new member list, in id
serv = services.find_srv_by_name_and_hostname(hst_name, s_name)
if serv is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
errors.append("Service %s not found for host %s" % (s_name, hst_name))
elif host:
warns.append("Service %s is excluded from host %s ; "
"removing this servicedependency as it's unusuable."
% (s_name, hst_name))
to_del.append(servicedep)
continue
servicedep.dependent_service_description = serv
s_name = servicedep.service_description
hst_name = servicedep.host_name
# The new member list, in id
serv = services.find_srv_by_name_and_hostname(hst_name, s_name)
if serv is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
errors.append("Service %s not found for host %s" % (s_name, hst_name))
elif host:
warns.append("Service %s is excluded from host %s ; "
"removing this servicedependency as it's unusuable."
% (s_name, hst_name))
to_del.append(servicedep)
continue
servicedep.service_description = serv
except AttributeError as err:
logger.error("[servicedependency] fail to linkify by service %s: %s",
servicedep, err)
to_del.append(servicedep)
for servicedep in to_del:
self.remove_item(servicedep)
def linkify_sd_by_tp(self, timeperiods):
"""Replace dependency_period by a real object in service dependency
:param timeperiods: list of timeperiod, used to look for a specific one
:type timeperiods: alignak.objects.timeperiod.Timeperiods
:return: None
"""
for servicedep in self:
try:
tp_name = servicedep.dependency_period
timeperiod = timeperiods.find_by_name(tp_name)
servicedep.dependency_period = timeperiod
except AttributeError, exp:
logger.error("[servicedependency] fail to linkify by timeperiods: %s", exp)
def linkify_s_by_sd(self):
"""Add dependency in service objects
:return: None
"""
for servicedep in self:
dsc = servicedep.dependent_service_description
sdval = servicedep.service_description
if dsc is not None and sdval is not None:
dep_period = getattr(servicedep, 'dependency_period', None)
dsc.add_service_act_dependency(sdval, servicedep.notification_failure_criteria,
dep_period, servicedep.inherits_parent)
dsc.add_service_chk_dependency(sdval, servicedep.execution_failure_criteria,
dep_period, servicedep.inherits_parent)
def is_correct(self):
"""Check if this host configuration is correct ::
* All required parameter are specified
* Go through all configuration warnings and errors that could have been raised earlier
:return: True if the configuration is correct, otherwise False
:rtype: bool
"""
valid = super(Servicedependencies, self).is_correct()
return valid and self.no_loop_in_parents("service_description",
"dependent_service_description")
|
brson/servo | refs/heads/master | src/components/script/dom/bindings/codegen/parser/tests/test_union.py | 134 | import WebIDL
import itertools
import string
# We'd like to use itertools.chain but it's 2.6 or higher.
def chain(*iterables):
# chain('ABC', 'DEF') --> A B C D E F
for it in iterables:
for element in it:
yield element
# We'd like to use itertools.combinations but it's 2.6 or higher.
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
# We'd like to use itertools.combinations_with_replacement but it's 2.7 or
# higher.
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def WebIDLTest(parser, harness):
types = ["float",
"double",
"short",
"unsigned short",
"long",
"unsigned long",
"long long",
"unsigned long long",
"boolean",
"byte",
"octet",
"DOMString",
#"sequence<float>",
"object",
"ArrayBuffer",
#"Date",
"TestInterface1",
"TestInterface2"]
testPre = """
interface TestInterface1 {
};
interface TestInterface2 {
};
"""
interface = testPre + """
interface PrepareForTest {
"""
for (i, type) in enumerate(types):
interface += string.Template("""
readonly attribute ${type} attr${i};
""").substitute(i=i, type=type)
interface += """
};
"""
parser.parse(interface)
results = parser.finish()
iface = results[2]
parser = parser.reset()
def typesAreDistinguishable(t):
return all(u[0].isDistinguishableFrom(u[1]) for u in combinations(t, 2))
def typesAreNotDistinguishable(t):
return any(not u[0].isDistinguishableFrom(u[1]) for u in combinations(t, 2))
def unionTypeName(t):
if len(t) > 2:
t[0:2] = [unionTypeName(t[0:2])]
return "(" + " or ".join(t) + ")"
# typeCombinations is an iterable of tuples containing the name of the type
# as a string and the parsed IDL type.
def unionTypes(typeCombinations, predicate):
for c in typeCombinations:
if predicate(t[1] for t in c):
yield unionTypeName([t[0] for t in c])
# We limit invalid union types with a union member type to the subset of 3
# types with one invalid combination.
# typeCombinations is an iterable of tuples containing the name of the type
# as a string and the parsed IDL type.
def invalidUnionWithUnion(typeCombinations):
for c in typeCombinations:
if (typesAreNotDistinguishable((c[0][1], c[1][1])) and
typesAreDistinguishable((c[1][1], c[2][1])) and
typesAreDistinguishable((c[0][1], c[2][1]))):
yield unionTypeName([t[0] for t in c])
# Create a list of tuples containing the name of the type as a string and
# the parsed IDL type.
types = zip(types, (a.type for a in iface.members))
validUnionTypes = chain(unionTypes(combinations(types, 2), typesAreDistinguishable),
unionTypes(combinations(types, 3), typesAreDistinguishable))
invalidUnionTypes = chain(unionTypes(combinations_with_replacement(types, 2), typesAreNotDistinguishable),
invalidUnionWithUnion(combinations(types, 3)))
interface = testPre + """
interface TestUnion {
"""
for (i, type) in enumerate(validUnionTypes):
interface += string.Template("""
void method${i}(${type} arg);
${type} returnMethod${i}();
attribute ${type} attr${i};
void arrayMethod${i}(${type}[] arg);
${type}[] arrayReturnMethod${i}();
attribute ${type}[] arrayAttr${i};
void optionalMethod${i}(${type}? arg);
""").substitute(i=i, type=type)
interface += """
};
"""
parser.parse(interface)
results = parser.finish()
parser = parser.reset()
for invalid in invalidUnionTypes:
interface = testPre + string.Template("""
interface TestUnion {
void method(${type} arg);
};
""").substitute(type=invalid)
threw = False
try:
parser.parse(interface)
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
|
40223117cda/cda | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/xml/etree/__init__.py | 1200 | # $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
|
ryokbys/nap | refs/heads/master | nappy/vasp/potcar.py | 1 | #!/bin/env python
from __future__ import print_function
import re
def read_POTCAR(fname='POTCAR'):
species=[]
valence=[]
encut=[]
isp=0
with open(fname,'r') as f:
lines= f.readlines()
for iline in range(len(lines)):
line= lines[iline]
if (re.match(r'^\s+US ',line) or
re.match(r'^\s+PAW_(PBE|GGA) ',line) or re.match(r'^\s+PAW ',line)) and \
'radial sets' not in line:
isp=isp +1
data= line.split()
species.append(data[1]) # species name
valence.append(float(lines[iline+1].rstrip()))
if 'ENMAX' in line:
data= line.split()
encut.append(float(data[2].rstrip(';')))
potcar={}
potcar['num_species']= isp
potcar['species']= species
potcar['valence']= valence
potcar['encut']= encut
return potcar
if __name__ == '__main__':
potcar=read_POTCAR()
print(potcar)
|
athompso/ansible-modules-core | refs/heads/devel | cloud/google/gce_lb.py | 130 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_lb
version_added: "1.5"
short_description: create/destroy GCE load-balancer resources
description:
- This module can create and destroy Google Compute Engine C(loadbalancer)
and C(httphealthcheck) resources. The primary LB resource is the
C(load_balancer) resource and the health check parameters are all
prefixed with I(httphealthcheck).
The full documentation for Google Compute Engine load balancing is at
U(https://developers.google.com/compute/docs/load-balancing/). However,
the ansible module simplifies the configuration by following the
libcloud model.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
httphealthcheck_name:
description:
- the name identifier for the HTTP health check
required: false
default: null
httphealthcheck_port:
description:
- the TCP port to use for HTTP health checking
required: false
default: 80
httphealthcheck_path:
description:
- the url path to use for HTTP health checking
required: false
default: "/"
httphealthcheck_interval:
description:
- the duration in seconds between each health check request
required: false
default: 5
httphealthcheck_timeout:
description:
- the timeout in seconds before a request is considered a failed check
required: false
default: 5
httphealthcheck_unhealthy_count:
description:
- number of consecutive failed checks before marking a node unhealthy
required: false
default: 2
httphealthcheck_healthy_count:
description:
- number of consecutive successful checks before marking a node healthy
required: false
default: 2
httphealthcheck_host:
description:
- host header to pass through on HTTP check requests
required: false
default: null
name:
description:
- name of the load-balancer resource
required: false
default: null
protocol:
description:
- the protocol used for the load-balancer packet forwarding, tcp or udp
required: false
default: "tcp"
choices: ['tcp', 'udp']
region:
description:
- the GCE region where the load-balancer is defined
required: false
external_ip:
description:
- the external static IPv4 (or auto-assigned) address for the LB
required: false
default: null
port_range:
description:
- the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
required: false
default: null
members:
description:
- a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
required: false
aliases: ['nodes']
state:
description:
- desired state of the LB
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple example of creating a new LB, adding members, and a health check
- local_action:
module: gce_lb
name: testlb
region: us-central1
members: ["us-central1-a/www-a", "us-central1-b/www-b"]
httphealthcheck_name: hc
httphealthcheck_port: 80
httphealthcheck_path: "/up"
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
httphealthcheck_name = dict(),
httphealthcheck_port = dict(default=80),
httphealthcheck_path = dict(default='/'),
httphealthcheck_interval = dict(default=5),
httphealthcheck_timeout = dict(default=5),
httphealthcheck_unhealthy_count = dict(default=2),
httphealthcheck_healthy_count = dict(default=2),
httphealthcheck_host = dict(),
name = dict(),
protocol = dict(default='tcp'),
region = dict(),
external_ip = dict(),
port_range = dict(),
members = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
gce = gce_connect(module)
httphealthcheck_name = module.params.get('httphealthcheck_name')
httphealthcheck_port = module.params.get('httphealthcheck_port')
httphealthcheck_path = module.params.get('httphealthcheck_path')
httphealthcheck_interval = module.params.get('httphealthcheck_interval')
httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
httphealthcheck_unhealthy_count = \
module.params.get('httphealthcheck_unhealthy_count')
httphealthcheck_healthy_count = \
module.params.get('httphealthcheck_healthy_count')
httphealthcheck_host = module.params.get('httphealthcheck_host')
name = module.params.get('name')
protocol = module.params.get('protocol')
region = module.params.get('region')
external_ip = module.params.get('external_ip')
port_range = module.params.get('port_range')
members = module.params.get('members')
state = module.params.get('state')
try:
gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
gcelb.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = False
json_output = {'name': name, 'state': state}
if not name and not httphealthcheck_name:
module.fail_json(msg='Nothing to do, please specify a "name" ' + \
'or "httphealthcheck_name" parameter', changed=False)
if state in ['active', 'present']:
# first, create the httphealthcheck if requested
hc = None
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
host=httphealthcheck_host, path=httphealthcheck_path,
port=httphealthcheck_port,
interval=httphealthcheck_interval,
timeout=httphealthcheck_timeout,
unhealthy_threshold=httphealthcheck_unhealthy_count,
healthy_threshold=httphealthcheck_healthy_count)
changed = True
except ResourceExistsError:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if hc is not None:
json_output['httphealthcheck_host'] = hc.extra['host']
json_output['httphealthcheck_path'] = hc.path
json_output['httphealthcheck_port'] = hc.port
json_output['httphealthcheck_interval'] = hc.interval
json_output['httphealthcheck_timeout'] = hc.timeout
json_output['httphealthcheck_unhealthy_count'] = \
hc.unhealthy_threshold
json_output['httphealthcheck_healthy_count'] = \
hc.healthy_threshold
# create the forwarding rule (and target pool under the hood)
lb = None
if name:
if not region:
module.fail_json(msg='Missing required region name',
changed=False)
nodes = []
output_nodes = []
json_output['name'] = name
# members is a python list of 'zone/inst' strings
if members:
for node in members:
try:
zone, node_name = node.split('/')
nodes.append(gce.ex_get_node(node_name, zone))
output_nodes.append(node)
except:
# skip nodes that are badly formatted or don't exist
pass
try:
if hc is not None:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_healthchecks=[hc],
ex_address=external_ip)
else:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_address=external_ip)
changed = True
except ResourceExistsError:
lb = gcelb.get_balancer(name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if lb is not None:
json_output['members'] = output_nodes
json_output['protocol'] = protocol
json_output['region'] = region
json_output['external_ip'] = lb.ip
json_output['port_range'] = lb.port
hc_names = []
if 'healthchecks' in lb.extra:
for hc in lb.extra['healthchecks']:
hc_names.append(hc.name)
json_output['httphealthchecks'] = hc_names
if state in ['absent', 'deleted']:
# first, delete the load balancer (forwarding rule and target pool)
# if specified.
if name:
json_output['name'] = name
try:
lb = gcelb.get_balancer(name)
gcelb.destroy_balancer(lb)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# destroy the health check if specified
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
gce.ex_destroy_healthcheck(hc)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
|
bowen0701/algorithms_data_structures | refs/heads/master | lc0013_roman_to_integer.py | 1 | """Leetcode 13. Roman to Integer
Easy
URL: https://leetcode.com/problems/roman-to-integer/
Roman numerals are represented by seven different symbols:
I, V, X, L, C, D and M.
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
For example, two is written as II in Roman numeral,
just two one's added together.
Twelve is written as, XII, which is simply X + II.
The number twenty seven is written as XXVII, which is XX + V + II.
Roman numerals are usually written largest to smallest from left to right.
However, the numeral for four is not IIII.
Instead, the number four is written as IV.
Because the one is before the five we subtract it making four.
The same principle applies to the number nine, which is written as IX.
There are six instances where subtraction is used:
- I can be placed before V (5) and X (10) to make 4 and 9.
- X can be placed before L (50) and C (100) to make 40 and 90.
- C can be placed before D (500) and M (1000) to make 400 and 900.
Given a roman numeral, convert it to an integer.
Input is guaranteed to be within the range from 1 to 3999.
Example 1:
Input: "III"
Output: 3
Example 2:
Input: "IV"
Output: 4
Example 3:
Input: "IX"
Output: 9
Example 4:
Input: "LVIII"
Output: 58
Explanation: L = 50, V= 5, III = 3.
Example 5:
Input: "MCMXCIV"
Output: 1994
Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
"""
class SolutionSubtractRules(object):
def _get_roman_to_value(self, i, roman):
# If roman is I, X or C, check its next roman to decide subtract or add.
if ((roman == 'I' and self.s[i + 1] in ['V', 'X'])
or (roman == 'X' and self.s[i + 1] in ['L', 'C'])
or (roman == 'C' and self.s[i + 1] in ['D', 'M'])):
return -self.roman_int_d[roman]
else:
return self.roman_int_d[roman]
def romanToInt(self, s):
"""
:type s: str
:rtype: int
Time complexity: O(n), where n is the length of s.
Space complexity: O(1), which is the size of roman to integer dict.
"""
self.s = s
n = len(self.s)
# Create a dict:roman->int.
self.roman_int_d = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
# For each roman, get its value by rules and add it to result.
result = 0
for i, roman in enumerate(s):
if i < n - 1:
result += self._get_roman_to_value(i, roman)
else:
result += self.roman_int_d[roman]
return result
class SolutionLeftBigger(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
Time complexity: O(n), where n is the length of s.
Space complexity: O(1), which is the size of roman to integer dict.
"""
n = len(s)
# Create a dict:roman->int.
roman_int_d = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
# Check if right's int is bigger, subtract, o.w. add to result.
result = 0
for i in range(n - 1):
if roman_int_d[s[i]] < roman_int_d[s[i + 1]]:
result -= roman_int_d[s[i]]
else:
result += roman_int_d[s[i]]
result += roman_int_d[s[n - 1]]
return result
class SolutionReplace(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
Time complexity: O(n^2), where n is the length of s.
Space complexity: O(n).
"""
# Replace chars by rules:
# - I can be placed before V (5) and X (10) to make 4 and 9.
# - X can be placed before L (50) and C (100) to make 40 and 90.
# - C can be placed before D (500) and M (1000) to make 400 and 900.
s = (s.replace('IV', 'IIII').replace('IX', 'IIIIIIIII')
.replace('XL', 'XXXX').replace('XC', 'XXXXXXXXX')
.replace('CD', 'CCCC').replace('CM', 'CCCCCCCCC'))
# Create a dict:roman->int.
roman_int_d = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
n = len(s)
result = 0
for i in range(n):
result += roman_int_d[s[i]]
return result
def main():
import time
print 'By SolutionSubtractRules:'
start_time = time.time()
s = 'III' # Output: 3.
print SolutionSubtractRules().romanToInt(s)
s = 'IV' # Output: 4.
print SolutionSubtractRules().romanToInt(s)
s = 'IX' # Output: 9.
print SolutionSubtractRules().romanToInt(s)
s = 'LVIII' # Output: 58.
print SolutionSubtractRules().romanToInt(s)
s = 'MCMXCIV' # Output: 1994.
print SolutionSubtractRules().romanToInt(s)
print 'Time: {}'.format(time.time() - start_time)
print 'By SolutionLeftBigger:'
start_time = time.time()
s = 'III' # Output: 3.
print SolutionLeftBigger().romanToInt(s)
s = 'IV' # Output: 4.
print SolutionLeftBigger().romanToInt(s)
s = 'IX' # Output: 9.
print SolutionLeftBigger().romanToInt(s)
s = 'LVIII' # Output: 58.
print SolutionLeftBigger().romanToInt(s)
s = 'MCMXCIV' # Output: 1994.
print SolutionLeftBigger().romanToInt(s)
print 'Time: {}'.format(time.time() - start_time)
print 'By SolutionReplace:'
start_time = time.time()
s = 'III' # Output: 3.
print SolutionReplace().romanToInt(s)
s = 'IV' # Output: 4.
print SolutionReplace().romanToInt(s)
s = 'IX' # Output: 9.
print SolutionReplace().romanToInt(s)
s = 'LVIII' # Output: 58.
print SolutionReplace().romanToInt(s)
s = 'MCMXCIV' # Output: 1994.
print SolutionReplace().romanToInt(s)
print 'Time: {}'.format(time.time() - start_time)
if __name__ == '__main__':
main()
|
pyladieshre/pyladies | refs/heads/master | harare/settings.py | 1 | """
Django settings for harare project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'xnk36o$h1m!)p0y!b(63myjcw_69be&1k@e91(jdftia3^h1h*'
# SECURITY WARNING: don't run with debug turned on in production!
# The following checks to see if running on Heroku and then disables debugging.
# http://stackoverflow.com/questions/9383450/how-can-i-detect-herokus-environment
ON_HEROKU = False
if 'DATABASE_URL' in os.environ:
ON_HEROKU = True
DEBUG = True
if ON_HEROKU:
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third_party_apps
'crispy_forms',
'bootstrap3',
'markitup',
'autoslug',
# my_apps
'pyladies_harare',
'talks',
'profiles',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'harare.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
WSGI_APPLICATION = 'harare.wsgi.application'
MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True})
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Update database configuration with $DATABASE_URL
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# AUTH_USER_MODEL = 'auth.User'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Harare'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
|
Comunitea/OCB | refs/heads/8.0 | addons/event/report/__init__.py | 435 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_event_registration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pdellaert/ansible | refs/heads/devel | lib/ansible/module_utils/network/ios/argspec/lldp_global/lldp_global.py | 19 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the ios_lldp_global module
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class Lldp_globalArgs(object):
def __init__(self, **kwargs):
pass
argument_spec = {'config': {'options': {'holdtime': {'type': 'int'},
'reinit': {'type': 'int'},
'enabled': {'type': 'bool'},
'timer': {'type': 'int'},
'tlv_select': {
'options': {
'four_wire_power_management': {'type': 'bool'},
'mac_phy_cfg': {'type': 'bool'},
'management_address': {'type': 'bool'},
'port_description': {'type': 'bool'},
'port_vlan': {'type': 'bool'},
'power_management': {'type': 'bool'},
'system_capabilities': {'type': 'bool'},
'system_description': {'type': 'bool'},
'system_name': {'type': 'bool'}
},
'type': 'dict'},
},
'type': 'dict'},
'state': {'choices': ['merged', 'replaced', 'deleted'],
'default': 'merged',
'type': 'str'}}
|
asnorkin/sentiment_analysis | refs/heads/master | site/lib/python2.7/site-packages/sklearn/utils/_scipy_sparse_lsqr_backport.py | 378 | """Sparse Equations and Least Squares.
The original Fortran code was written by C. C. Paige and M. A. Saunders as
described in
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
It is licensed under the following BSD license:
Copyright (c) 2006, Systems Optimization Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Stanford University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The Fortran code was translated to Python for use in CVXOPT by Jeffery
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
Adapted for SciPy by Stefan van der Walt.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsqr']
import numpy as np
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
eps = np.finfo(np.float64).eps
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
iter_lim=None, show=False, calc_var=False):
"""Find the least-squares solution to a large, sparse, linear system
of equations.
The function solves ``Ax = b`` or ``min ||b - Ax||^2`` or
``min ||Ax - b||^2 + d^2 ||x||^2``.
The matrix A may be square or rectangular (over-determined or
under-determined), and may have any rank.
::
1. Unsymmetric equations -- solve A*x = b
2. Linear least squares -- solve A*x = b
in the least-squares sense
3. Damped least squares -- solve ( A )*x = ( b )
( damp*I ) ( 0 )
in the least-squares sense
Parameters
----------
A : {sparse matrix, ndarray, LinearOperatorLinear}
Representation of an m-by-n matrix. It is required that
the linear operator can produce ``Ax`` and ``A^T x``.
b : (m,) ndarray
Right-hand side vector ``b``.
damp : float
Damping coefficient.
atol, btol : float, default 1.0e-8
Stopping tolerances. If both are 1.0e-9 (say), the final
residual norm should be accurate to about 9 digits. (The
final x will usually have fewer correct digits, depending on
cond(A) and the size of damp.)
conlim : float
Another stopping tolerance. lsqr terminates if an estimate of
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
b``, `conlim` could be as large as 1.0e+12 (say). For
least-squares problems, conlim should be less than 1.0e+8.
Maximum precision can be obtained by setting ``atol = btol =
conlim = zero``, but the number of iterations may then be
excessive.
iter_lim : int
Explicit limitation on number of iterations (for safety).
show : bool
Display an iteration log.
calc_var : bool
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
Returns
-------
x : ndarray of float
The final solution.
istop : int
Gives the reason for termination.
1 means x is an approximate solution to Ax = b.
2 means x approximately solves the least-squares problem.
itn : int
Iteration number upon termination.
r1norm : float
``norm(r)``, where ``r = b - Ax``.
r2norm : float
``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if
``damp == 0``.
anorm : float
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
acond : float
Estimate of ``cond(Abar)``.
arnorm : float
Estimate of ``norm(A'*r - damp^2*x)``.
xnorm : float
``norm(x)``
var : ndarray of float
If ``calc_var`` is True, estimates all diagonals of
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
damp^2*I)^{-1}``. This is well defined if A has full column
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
< n`` and ``damp = 0.``)
Notes
-----
LSQR uses an iterative method to approximate the solution. The
number of iterations required to reach a certain accuracy depends
strongly on the scaling of the problem. Poor scaling of the rows
or columns of A should therefore be avoided where possible.
For example, in problem 1 the solution is unaltered by
row-scaling. If a row of A is very small or large compared to
the other rows of A, the corresponding row of ( A b ) should be
scaled up or down.
In problems 1 and 2, the solution x is easily recovered
following column-scaling. Unless better information is known,
the nonzero columns of A should be scaled so that they all have
the same Euclidean norm (e.g., 1.0).
In problem 3, there is no freedom to re-scale if damp is
nonzero. However, the value of damp should be assigned only
after attention has been paid to the scaling of A.
The parameter damp is intended to help regularize
ill-conditioned systems, by preventing the true solution from
being very large. Another aid to regularization is provided by
the parameter acond, which may be used to terminate iterations
before the computed solution becomes very large.
If some initial estimate ``x0`` is known and if ``damp == 0``,
one could proceed as follows:
1. Compute a residual vector ``r0 = b - A*x0``.
2. Use LSQR to solve the system ``A*dx = r0``.
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
This requires that ``x0`` be available before and after the call
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
to solve A*x = b and k2 iterations to solve A*dx = r0.
If x0 is "good", norm(r0) will be smaller than norm(b).
If the same stopping tolerances atol and btol are used for each
system, k1 and k2 will be similar, but the final solution x0 + dx
should be more accurate. The only way to reduce the total work
is to use a larger stopping tolerance for the second system.
If some value btol is suitable for A*x = b, the larger value
btol*norm(b)/norm(r0) should be suitable for A*dx = r0.
Preconditioning is another way to reduce the number of iterations.
If it is possible to solve a related system ``M*x = b``
efficiently, where M approximates A in some helpful way (e.g. M -
A has low rank or its elements are small relative to those of A),
LSQR may converge more rapidly on the system ``A*M(inverse)*z =
b``, after which x can be recovered by solving M*x = z.
If A is symmetric, LSQR should not be used!
Alternatives are the symmetric conjugate-gradient method (cg)
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
applies to any symmetric A and will converge more rapidly than
LSQR. If A is positive definite, there are other implementations
of symmetric cg that require slightly less work per iteration than
SYMMLQ (but will take the same number of iterations).
References
----------
.. [1] C. C. Paige and M. A. Saunders (1982a).
"LSQR: An algorithm for sparse linear equations and
sparse least squares", ACM TOMS 8(1), 43-71.
.. [2] C. C. Paige and M. A. Saunders (1982b).
"Algorithm 583. LSQR: Sparse linear equations and least
squares problems", ACM TOMS 8(2), 195-209.
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
systems using LSQR and CRAIG", BIT 35, 588-604.
"""
A = aslinearoperator(A)
if len(b.shape) > 1:
b = b.squeeze()
m, n = A.shape
if iter_lim is None:
iter_lim = 2 * n
var = np.zeros(n)
msg = ('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
if show:
print(' ')
print('LSQR Least-squares solution of Ax = b')
str1 = 'The matrix A has %8g rows and %8g cols' % (m, n)
str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var)
str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim)
str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim)
print(str1)
print(str2)
print(str3)
print(str4)
itn = 0
istop = 0
nstop = 0
ctol = 0
if conlim > 0:
ctol = 1/conlim
anorm = 0
acond = 0
dampsq = damp**2
ddnorm = 0
res2 = 0
xnorm = 0
xxnorm = 0
z = 0
cs2 = -1
sn2 = 0
"""
Set up the first vectors u and v for the bidiagonalization.
These satisfy beta*u = b, alfa*v = A'u.
"""
__xm = np.zeros(m) # a matrix for temporary holding
__xn = np.zeros(n) # a matrix for temporary holding
v = np.zeros(n)
u = b
x = np.zeros(n)
alfa = 0
beta = np.linalg.norm(u)
w = np.zeros(n)
if beta > 0:
u = (1/beta) * u
v = A.rmatvec(u)
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1/alfa) * v
w = v.copy()
rhobar = alfa
phibar = beta
bnorm = beta
rnorm = beta
r1norm = rnorm
r2norm = rnorm
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
arnorm = alfa * beta
if arnorm == 0:
print(msg[0])
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
head1 = ' Itn x[0] r1norm r2norm '
head2 = ' Compatible LS Norm A Cond A'
if show:
print(' ')
print(head1, head2)
test1 = 1
test2 = alfa / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(str1, str2, str3)
# Main iteration loop.
while itn < iter_lim:
itn = itn + 1
"""
% Perform the next step of the bidiagonalization to obtain the
% next beta, u, alfa, v. These satisfy the relations
% beta*u = a*v - alfa*u,
% alfa*v = A'*u - beta*v.
"""
u = A.matvec(v) - alfa * u
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
v = A.rmatvec(u) - beta * v
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1 / alfa) * v
# Use a plane rotation to eliminate the damping parameter.
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
rhobar1 = sqrt(rhobar**2 + damp**2)
cs1 = rhobar / rhobar1
sn1 = damp / rhobar1
psi = sn1 * phibar
phibar = cs1 * phibar
# Use a plane rotation to eliminate the subdiagonal element (beta)
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
cs, sn, rho = _sym_ortho(rhobar1, beta)
theta = sn * alfa
rhobar = -cs * alfa
phi = cs * phibar
phibar = sn * phibar
tau = sn * phi
# Update x and w.
t1 = phi / rho
t2 = -theta / rho
dk = (1 / rho) * w
x = x + t1 * w
w = v + t2 * w
ddnorm = ddnorm + np.linalg.norm(dk)**2
if calc_var:
var = var + dk**2
# Use a plane rotation on the right to eliminate the
# super-diagonal element (theta) of the upper-bidiagonal matrix.
# Then use the result to estimate norm(x).
delta = sn2 * rho
gambar = -cs2 * rho
rhs = phi - delta * z
zbar = rhs / gambar
xnorm = sqrt(xxnorm + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar / gamma
sn2 = theta / gamma
z = rhs / gamma
xxnorm = xxnorm + z**2
# Test for convergence.
# First, estimate the condition of the matrix Abar,
# and the norms of rbar and Abar'rbar.
acond = anorm * sqrt(ddnorm)
res1 = phibar**2
res2 = res2 + psi**2
rnorm = sqrt(res1 + res2)
arnorm = alfa * abs(tau)
# Distinguish between
# r1norm = ||b - Ax|| and
# r2norm = rnorm in current code
# = sqrt(r1norm^2 + damp^2*||x||^2).
# Estimate r1norm from
# r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
# Although there is cancellation, it might be accurate enough.
r1sq = rnorm**2 - dampsq * xxnorm
r1norm = sqrt(abs(r1sq))
if r1sq < 0:
r1norm = -r1norm
r2norm = rnorm
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = rnorm / bnorm
test2 = arnorm / (anorm * rnorm + eps)
test3 = 1 / (acond + eps)
t1 = test1 / (1 + anorm * xnorm / bnorm)
rtol = btol + atol * anorm * xnorm / bnorm
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normal tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= iter_lim:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= iter_lim-10:
prnt = True
# if itn%10 == 0: prnt = True
if test3 <= 2*ctol:
prnt = True
if test2 <= 10*atol:
prnt = True
if test1 <= 10*rtol:
prnt = True
if istop != 0:
prnt = True
if prnt:
if show:
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (anorm, acond)
print(str1, str2, str3, str4)
if istop != 0:
break
# End of iteration loop.
# Print the stopping condition.
if show:
print(' ')
print('LSQR finished')
print(msg[istop])
print(' ')
str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm)
str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm)
str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm)
str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm)
print(str1 + ' ' + str2)
print(str3 + ' ' + str4)
print(' ')
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
|
ninuxorg/nodeshot | refs/heads/master | nodeshot/conf/urls.py | 5 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls))
)
if 'smuggler' in settings.INSTALLED_APPS:
# smuggler must be before admin
urlpatterns = patterns('',
url(r'^admin/', include('smuggler.urls'))
) + urlpatterns
if 'filebrowser' in settings.INSTALLED_APPS:
from filebrowser.sites import site
urlpatterns += patterns('',
url(r'^admin/filebrowser/', include(site.urls)),
)
if 'rosetta' in settings.INSTALLED_APPS:
# rosetta must be before admin
urlpatterns = patterns('',
url(r'^admin/translations/', include('rosetta.urls')),
) + urlpatterns
if 'nodeshot.interop.sync' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'', include('nodeshot.interop.sync.urls')),
)
if settings.SERVE_STATIC:
from django.conf.urls.static import static
urlpatterns += patterns(url(r'^static/(?P<path>.*)$',
'django.contrib.staticfiles.views.serve'))
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if 'social.apps.django_app.default' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'', include('social.apps.django_app.urls', namespace='social')),
)
if 'grappelli' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^grappelli/', include('grappelli.urls')),
)
if 'nodeshot.core.websockets' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^websockets/', include('nodeshot.core.websockets.urls')),
)
if 'nodeshot.community.profiles' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^account/password/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<key>.+)/$',
'nodeshot.community.profiles.html_views.password_reset_from_key',
name='account_password_reset_from_key'),
)
from nodeshot.community.profiles.settings import EMAIL_CONFIRMATION
if EMAIL_CONFIRMATION:
urlpatterns += patterns('',
url(r'^confirm_email/(\w+)/$',
'nodeshot.community.profiles.html_views.confirm_email',
name='emailconfirmation_confirm_email'),
)
if 'nodeshot.core.api' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'', include('nodeshot.core.api.urls')),
)
if 'nodeshot.ui.open311_demo' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^open311/$', include('nodeshot.ui.open311_demo.urls', namespace='open311_demo', app_name='open311_demo')),
)
if 'nodeshot.ui.default' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'', include('nodeshot.ui.default.urls', namespace='ui', app_name='ui')),
)
urlpatterns += patterns('',
url(r'^jsi18n/$', 'nodeshot.core.base.views.jsi18n', {'packages': ('nodeshot.ui.default',)}, name='jsi18n')
)
|
aidora/linux | refs/heads/master | Documentation/target/tcm_mod_builder.py | 4981 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
Big-B702/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/curses/__init__.py | 45 | """curses
The main package for curses support for Python. Normally used by importing
the package, and perhaps a particular module inside it.
import curses
from curses import textpad
curses.initwin()
...
"""
from _curses import *
from curses.wrapper import wrapper
import os as _os
import sys as _sys
# Some constants, most notably the ACS_* ones, are only added to the C
# _curses module's dictionary after initscr() is called. (Some
# versions of SGI's curses don't define values for those constants
# until initscr() has been called.) This wrapper function calls the
# underlying C initscr(), and then copies the constants from the
# _curses module to the curses package's dictionary. Don't do 'from
# curses import *' if you'll be needing the ACS_* constants.
def initscr():
import _curses, curses
# we call setupterm() here because it raises an error
# instead of calling exit() in error cases.
setupterm(term=_os.environ.get("TERM", "unknown"),
fd=_sys.__stdout__.fileno())
stdscr = _curses.initscr()
for key, value in _curses.__dict__.items():
if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
setattr(curses, key, value)
return stdscr
# This is a similar wrapper for start_color(), which adds the COLORS and
# COLOR_PAIRS variables which are only available after start_color() is
# called.
def start_color():
import _curses, curses
retval = _curses.start_color()
if hasattr(_curses, 'COLORS'):
curses.COLORS = _curses.COLORS
if hasattr(_curses, 'COLOR_PAIRS'):
curses.COLOR_PAIRS = _curses.COLOR_PAIRS
return retval
# Import Python has_key() implementation if _curses doesn't contain has_key()
try:
has_key
except NameError:
from has_key import has_key
|
Jorge-Rodriguez/ansible-modules-core | refs/heads/devel | packaging/os/apt.py | 7 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Flowroute LLC
# Written by Matthew Williams <matthew@flowroute.com>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: apt
short_description: Manages apt-packages
description:
- Manages I(apt) packages (such as for Debian/Ubuntu).
version_added: "0.0.2"
options:
name:
description:
- A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding)
required: false
default: null
aliases: [ 'pkg', 'package' ]
state:
description:
- Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed.
required: false
default: present
choices: [ "latest", "absent", "present", "build-dep" ]
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
required: false
default: no
choices: [ "yes", "no" ]
cache_valid_time:
description:
- If C(update_cache) is specified and the last run is less or equal than I(cache_valid_time) seconds ago, the C(update_cache) gets skipped.
required: false
default: no
purge:
description:
- Will force purging of configuration files if the module state is set to I(absent).
required: false
default: no
choices: [ "yes", "no" ]
default_release:
description:
- Corresponds to the C(-t) option for I(apt) and sets pin priorities
required: false
default: null
install_recommends:
description:
- Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
required: false
default: null
choices: [ "yes", "no" ]
force:
description:
- If C(yes), force installs/removes.
required: false
default: "no"
choices: [ "yes", "no" ]
allow_unauthenticated:
description:
- Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.1"
upgrade:
description:
- 'If yes or safe, performs an aptitude safe-upgrade.'
- 'If full, performs an aptitude full-upgrade.'
- 'If dist, performs an apt-get dist-upgrade.'
- 'Note: This does not upgrade a specific package, use state=latest for that.'
version_added: "1.1"
required: false
default: "no"
choices: [ "no", "yes", "safe", "full", "dist"]
dpkg_options:
description:
- Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
- Options should be supplied as comma separated list
required: false
default: 'force-confdef,force-confold'
deb:
description:
- Path to a .deb package on the remote machine.
- If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
required: false
version_added: "1.6"
autoremove:
description:
- If C(yes), remove unused dependency packages for all module states except I(build-dep).
required: false
default: no
choices: [ "yes", "no" ]
aliases: [ 'autoclean']
version_added: "2.1"
only_upgrade:
description:
- Only install/upgrade a package if it is already installed.
required: false
default: false
version_added: "2.1"
requirements: [ python-apt, aptitude ]
author: "Matthew Williams (@mgwilliams)"
notes:
- Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise
C(apt-get) suffices.
'''
EXAMPLES = '''
# Update repositories cache and install "foo" package
- apt: name=foo update_cache=yes
# Remove "foo" package
- apt: name=foo state=absent
# Install the package "foo"
- apt: name=foo state=present
# Install the version '1.00' of package "foo"
- apt: name=foo=1.00 state=present
# Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
- apt: name=nginx state=latest default_release=squeeze-backports update_cache=yes
# Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
- apt: name=openjdk-6-jdk state=latest install_recommends=no
# Update all packages to the latest version
- apt: upgrade=dist
# Run the equivalent of "apt-get update" as a separate step
- apt: update_cache=yes
# Only run "update_cache=yes" if the last one is more than 3600 seconds ago
- apt: update_cache=yes cache_valid_time=3600
# Pass options to dpkg on run
- apt: upgrade=dist update_cache=yes dpkg_options='force-confold,force-confdef'
# Install a .deb package
- apt: deb=/tmp/mypackage.deb
# Install the build dependencies for package "foo"
- apt: pkg=foo state=build-dep
# Install a .deb package from the internet.
- apt: deb=https://example.com/python-ppq_0.1-1_all.deb
'''
RETURN = '''
cache_updated:
description: if the cache was updated or not
returned: success, in some cases
type: boolean
sample: True
cache_update_time:
description: time of the last cache update (0 if unknown)
returned: success, in some cases
type: datetime
sample: 1425828348000
stdout:
description: output from apt
returned: success, when needed
type: string
sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..."
stderr:
description: error output from apt
returned: success, when needed
type: string
sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
'''
import traceback
# added to stave off future warnings about apt api
import warnings
warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
import os
import datetime
import fnmatch
import itertools
# APT related constants
APT_ENV_VARS = dict(
DEBIAN_FRONTEND = 'noninteractive',
DEBIAN_PRIORITY = 'critical',
# We screenscrape apt-get and aptitude output for information so we need
# to make sure we use the C locale when running commands
LANG = 'C',
LC_ALL = 'C',
LC_MESSAGES = 'C',
LC_CTYPE = 'C',
)
DPKG_OPTIONS = 'force-confdef,force-confold'
APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
APT_LISTS_PATH = "/var/lib/apt/lists"
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
HAS_PYTHON_APT = True
try:
import apt
import apt.debfile
import apt_pkg
except ImportError:
HAS_PYTHON_APT = False
def package_split(pkgspec):
parts = pkgspec.split('=', 1)
if len(parts) > 1:
return parts[0], parts[1]
else:
return parts[0], None
def package_versions(pkgname, pkg, pkg_cache):
try:
versions = set(p.version for p in pkg.versions)
except AttributeError:
# assume older version of python-apt is installed
# apt.package.Package#versions require python-apt >= 0.7.9.
pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname)
pkg_versions = (p.VersionList for p in pkg_cache_list)
versions = set(p.VerStr for p in itertools.chain(*pkg_versions))
return versions
def package_version_compare(version, other_version):
try:
return apt_pkg.version_compare(version, other_version)
except AttributeError:
return apt_pkg.VersionCompare(version, other_version)
def package_status(m, pkgname, version, cache, state):
try:
# get the package from the cache, as well as the
# the low-level apt_pkg.Package object which contains
# state fields not directly acccesible from the
# higher-level apt.package.Package object.
pkg = cache[pkgname]
ll_pkg = cache._cache[pkgname] # the low-level package object
except KeyError:
if state == 'install':
try:
provided_packages = cache.get_providing_packages(pkgname)
if provided_packages:
is_installed = False
upgradable = False
# when virtual package providing only one package, look up status of target package
if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
package = provided_packages[0]
installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install')
if installed:
is_installed = True
return is_installed, upgradable, False
m.fail_json(msg="No package matching '%s' is available" % pkgname)
except AttributeError:
# python-apt version too old to detect virtual packages
# mark as upgradable and let apt-get install deal with it
return False, True, False
else:
return False, False, False
try:
has_files = len(pkg.installed_files) > 0
except UnicodeDecodeError:
has_files = True
except AttributeError:
has_files = False # older python-apt cannot be used to determine non-purged
try:
package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
except AttributeError: # python-apt 0.7.X has very weak low-level object
try:
# might not be necessary as python-apt post-0.7.X should have current_state property
package_is_installed = pkg.is_installed
except AttributeError:
# assume older version of python-apt is installed
package_is_installed = pkg.isInstalled
if version:
versions = package_versions(pkgname, pkg, cache._cache)
avail_upgrades = fnmatch.filter(versions, version)
if package_is_installed:
try:
installed_version = pkg.installed.version
except AttributeError:
installed_version = pkg.installedVersion
# Only claim the package is installed if the version is matched as well
package_is_installed = fnmatch.fnmatch(installed_version, version)
# Only claim the package is upgradable if a candidate matches the version
package_is_upgradable = False
for candidate in avail_upgrades:
if package_version_compare(candidate, installed_version) > 0:
package_is_upgradable = True
break
else:
package_is_upgradable = bool(avail_upgrades)
else:
try:
package_is_upgradable = pkg.is_upgradable
except AttributeError:
# assume older version of python-apt is installed
package_is_upgradable = pkg.isUpgradable
return package_is_installed, package_is_upgradable, has_files
def expand_dpkg_options(dpkg_options_compressed):
options_list = dpkg_options_compressed.split(',')
dpkg_options = ""
for dpkg_option in options_list:
dpkg_options = '%s -o "Dpkg::Options::=--%s"' \
% (dpkg_options, dpkg_option)
return dpkg_options.strip()
def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
# Note: apt-get does implicit regex matching when an exact package name
# match is not found. Something like this:
# matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)]
# (Should also deal with the ':' for multiarch like the fnmatch code below)
#
# We have decided not to do similar implicit regex matching but might take
# a PR to add some sort of explicit regex matching:
# https://github.com/ansible/ansible-modules-core/issues/1258
new_pkgspec = []
for pkgspec_pattern in pkgspec:
pkgname_pattern, version = package_split(pkgspec_pattern)
# note that none of these chars is allowed in a (debian) pkgname
if frozenset('*?[]!').intersection(pkgname_pattern):
# handle multiarch pkgnames, the idea is that "apt*" should
# only select native packages. But "apt*:i386" should still work
if not ":" in pkgname_pattern:
try:
pkg_name_cache = _non_multiarch
except NameError:
pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if not ':' in pkg.name]
else:
try:
pkg_name_cache = _all_pkg_names
except NameError:
pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache]
matches = fnmatch.filter(pkg_name_cache, pkgname_pattern)
if len(matches) == 0:
m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern))
else:
new_pkgspec.extend(matches)
else:
# No wildcards in name
new_pkgspec.append(pkgspec_pattern)
return new_pkgspec
def parse_diff(output):
diff = output.splitlines()
try:
# check for start marker from aptitude
diff_start = diff.index('Resolving dependencies...')
except ValueError:
try:
# check for start marker from apt-get
diff_start = diff.index('Reading state information...')
except ValueError:
# show everything
diff_start = -1
try:
# check for end marker line from both apt-get and aptitude
diff_end = (i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item)).next()
except StopIteration:
diff_end = len(diff)
diff_start += 1
diff_end += 1
return {'prepared': '\n'.join(diff[diff_start:diff_end])}
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False, autoremove=False, only_upgrade=False,
allow_unauthenticated=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version = package_split(package)
installed, upgradable, has_files = package_status(m, name, version, cache, state='install')
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
if not installed or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed and upgradable and version:
# This happens when the package is installed, a newer version is
# available, and the version is a wildcard that matches both
#
# We do not apply the upgrade flag because we cannot specify both
# a version and state=latest. (This behaviour mirrors how apt
# treats a version with wildcard in the package)
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if len(packages) != 0:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if only_upgrade:
only_upgrade = '--only-upgrade'
else:
only_upgrade = ''
if build_dep:
cmd = "%s -y %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, force_yes, check_arg, packages)
else:
cmd = "%s -y %s %s %s %s %s install %s" % (APT_GET_CMD, dpkg_options, only_upgrade, force_yes, autoremove, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
if allow_unauthenticated:
cmd += " --allow-unauthenticated"
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
return (False, dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err))
else:
return (True, dict(changed=True, stdout=out, stderr=err, diff=diff))
else:
return (True, dict(changed=False))
def get_field_of_deb(m, deb_file, field="Version"):
cmd_dpkg = m.get_bin_path("dpkg", True)
cmd = cmd_dpkg + " --field %s %s" % (deb_file, field)
rc, stdout, stderr = m.run_command(cmd)
if rc != 0:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
return stdout.strip('\n')
def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated, dpkg_options):
changed=False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
try:
pkg = apt.debfile.DebPackage(deb_file)
pkg_name = get_field_of_deb(m, deb_file, "Package")
pkg_version = get_field_of_deb(m, deb_file, "Version")
try:
installed_pkg = apt.Cache()[pkg_name]
installed_version = installed_pkg.installed.version
if package_version_compare(pkg_version, installed_version) == 0:
# Does not need to down-/upgrade, move on to next package
continue
except Exception:
# Must not be installed, continue with installation
pass
# Check if package is installable
if not pkg.check() and not force:
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
except Exception:
e = get_exception()
m.fail_json(msg="Unable to install package: %s" % str(e))
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if len(deps_to_install) > 0:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if len(pkgs_to_install) > 0:
options = ' '.join(["--%s"% x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "diff" in retvals:
diff = retvals["diff"]
if 'prepared' in diff:
diff['prepared'] += '\n\n' + out
else:
diff = parse_diff(out)
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout',''), stderr=retvals.get('stderr',''), diff=retvals.get('diff', ''))
def remove(m, pkgspec, cache, purge=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False):
pkg_list = []
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version = package_split(package)
installed, upgradable, has_files = package_status(m, name, version, cache, state='remove')
if installed or (has_files and purge):
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if len(packages) == 0:
m.exit_json(changed=False)
else:
if purge:
purge = '--purge'
else:
purge = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
cmd = "%s -q -y %s %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, autoremove, check_arg, packages)
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err)
m.exit_json(changed=True, stdout=out, stderr=err, diff=diff)
def upgrade(m, mode="yes", force=False, default_release=None,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
prompt_regex = None
if mode == "dist":
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade"
elif mode == "full":
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = '--assume-yes --allow-untrusted'
else:
force_yes = ''
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
cmd = '%s -y %s %s %s %s' % (apt_cmd_path, dpkg_options,
force_yes, check_arg, upgrade_command)
if default_release:
cmd += " -t '%s'" % (default_release,)
rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
def download(module, deb):
tempdir = os.path.dirname(__file__)
package = os.path.join(tempdir, str(deb.rsplit('/', 1)[1]))
# When downloading a deb, how much of the deb to download before
# saving to a tempfile (64k)
BUFSIZE = 65536
try:
rsp, info = fetch_url(module, deb)
f = open(package, 'w')
# Read 1kb at a time to save on ram
while True:
data = rsp.read(BUFSIZE)
if data == "":
break # End of file, break while loop
f.write(data)
f.close()
deb = package
except Exception:
e = get_exception()
module.fail_json(msg="Failure downloading %s, %s" % (deb, e))
return deb
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present', 'build-dep']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
cache_valid_time = dict(type='int'),
purge = dict(default=False, type='bool'),
package = dict(default=None, aliases=['pkg', 'name'], type='list'),
deb = dict(default=None, type='path'),
default_release = dict(default=None, aliases=['default-release']),
install_recommends = dict(default=None, aliases=['install-recommends'], type='bool'),
force = dict(default='no', type='bool'),
upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']),
dpkg_options = dict(default=DPKG_OPTIONS),
autoremove = dict(type='bool', default=False, aliases=['autoclean']),
only_upgrade = dict(type='bool', default=False),
allow_unauthenticated = dict(default='no', aliases=['allow-unauthenticated'], type='bool'),
),
mutually_exclusive = [['package', 'upgrade', 'deb']],
required_one_of = [['package', 'upgrade', 'update_cache', 'deb']],
supports_check_mode = True
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="python-apt must be installed to use check mode. If run normally this module can autoinstall it")
try:
module.run_command('apt-get update', check_rc=True)
module.run_command('apt-get install python-apt -y -q', check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. Please install python-apt package.")
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]:
module.fail_json(msg="Could not find aptitude. Please ensure it is installed.")
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
# Deal with deprecated aliases
if p['state'] == 'installed':
p['state'] = 'present'
if p['state'] == 'removed':
p['state'] = 'absent'
try:
cache = apt.Cache()
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
if p['update_cache']:
# Default is: always update the cache
cache_valid = False
now = datetime.datetime.now()
if p.get('cache_valid_time', False):
try:
mtime = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
except:
# Looks like the update-success-stamp is not available
# Fallback: Checking the mtime of the lists
try:
mtime = os.stat(APT_LISTS_PATH).st_mtime
except:
# No mtime could be read. We update the cache to be safe
mtime = False
if mtime:
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
mtimestamp = datetime.datetime.fromtimestamp(mtime)
if mtimestamp + tdelta >= now:
cache_valid = True
updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
if cache_valid is not True:
for retry in xrange(3):
try:
cache.update()
break
except apt.cache.FetchFailedException:
pass
else:
#out of retries, pass on the exception
raise
cache.open(progress=None)
updated_cache = True
updated_cache_time = int(time.mktime(now.timetuple()))
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(changed=False, cache_updated=updated_cache, cache_update_time=updated_cache_time)
else:
updated_cache = False
updated_cache_time = 0
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], dpkg_options)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = download(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, dpkg_options=p['dpkg_options'])
packages = p['package']
latest = p['state'] == 'latest'
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if p['state'] in ('latest', 'present', 'build-dep'):
state_upgrade = False
state_builddep = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
result = install(module, packages, cache, upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes, dpkg_options=dpkg_options,
build_dep=state_builddep, autoremove=autoremove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated)
(success, retvals) = result
retvals['cache_updated']=updated_cache
retvals['cache_update_time']=updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], dpkg_options, autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == "__main__":
main()
|
samabhi/pstHealth | refs/heads/master | venv/lib/python2.7/site-packages/setuptools/package_index.py | 301 | """PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
from functools import wraps
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from setuptools.compat import (urllib2, httplib, StringIO, HTTPError,
urlparse, urlunparse, unquote, splituser,
url2pathname, name2codepoint,
unichr, urljoin, urlsplit, urlunsplit,
ConfigParser)
from setuptools.compat import filterfalse
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse(url)
base = unquote(path.split('/')[-1])
if server=='sourceforge.net' and base=='download': # XXX Yuck
base = unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match('py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
)
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else: self.opener = urllib2.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
def scan_egg_link(self, path, entry):
lines = [_f for _f in map(str.strip,
open(os.path.join(path, entry))) if _f]
if len(lines)==2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
unquote, link[len(self.index_url):].split('/')
))
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=self.download(dist.location, tmpdir))
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, info = None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename,'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp: fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, httplib.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError as v:
return v
except urllib2.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except httplib.HTTPException as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme=='file':
return url2pathname(urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/',1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':',1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username="+auth
netloc = host
url = urlunparse((scheme, netloc, url, p, q, f))
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#',1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(ConfigParser.ConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
ConfigParser.ConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib2.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)' % info)
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlunparse((s2,netloc,path2,param2,query2,frag2))
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
with open(os.path.join(filename,f),'r') as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body))
|
camptocamp/QGIS | refs/heads/master | python/plugins/processing/gui/BatchOutputSelectionPanel.py | 1 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BatchOutputSelectionPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4 import QtGui, QtCore
from processing.gui.AutofillDialog import AutofillDialog
from processing.parameters.ParameterMultipleInput import ParameterMultipleInput
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterTable import ParameterTable
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.parameters.ParameterSelection import ParameterSelection
from processing.parameters.ParameterFixedTable import ParameterFixedTable
import os.path
class BatchOutputSelectionPanel(QtGui.QWidget):
def __init__(self, output, alg, row, col, batchDialog):
super(BatchOutputSelectionPanel, self).__init__(None)
self.alg = alg
self.row = row
self.col = col
self.output = output
self.batchDialog = batchDialog
self.table = batchDialog.table
self.horizontalLayout = QtGui.QHBoxLayout(self)
self.horizontalLayout.setSpacing(2)
self.horizontalLayout.setMargin(0)
self.text = QtGui.QLineEdit()
self.text.setText("")
self.text.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.horizontalLayout.addWidget(self.text)
self.pushButton = QtGui.QPushButton()
self.pushButton.setText("...")
self.pushButton.clicked.connect(self.showSelectionDialog)
self.horizontalLayout.addWidget(self.pushButton)
self.setLayout(self.horizontalLayout)
def showSelectionDialog(self):
filefilter = self.output.getFileFilter(self.alg)
filename = QtGui.QFileDialog.getSaveFileName(self, "Save file", "", filefilter)
if filename:
filename = unicode(filename)
dlg = AutofillDialog(self.alg)
dlg.exec_()
if dlg.mode != None:
try:
if dlg.mode == AutofillDialog.DO_NOT_AUTOFILL:
#self.text.setValue(filename)
self.table.cellWidget(self.row, self.col).setValue(filename)
elif dlg.mode == AutofillDialog.FILL_WITH_NUMBERS:
n = self.table.rowCount() - self.row
for i in range(n):
name = filename[:filename.rfind(".")] + str(i+1) + filename[filename.rfind("."):]
self.table.cellWidget(i + self.row, self.col).setValue(name)
elif dlg.mode == AutofillDialog.FILL_WITH_PARAMETER:
n = self.table.rowCount() - self.row
for i in range(n):
widget = self.table.cellWidget(i+self.row, dlg.param)
param = self.alg.parameters[dlg.param]
if isinstance(param, (ParameterRaster, ParameterVector, ParameterTable, ParameterMultipleInput)):
s = unicode(widget.getText())
s = os.path.basename(s)
s= s[:s.rfind(".")]
elif isinstance(param, ParameterBoolean):
s = str(widget.currentIndex() == 0)
elif isinstance(param, ParameterSelection):
s = unicode(widget.currentText())
elif isinstance(param, ParameterFixedTable):
s = unicode(widget.table)
else:
s = unicode(widget.text())
name = filename[:filename.rfind(".")] + s + filename[filename.rfind("."):]
self.table.cellWidget(i + self.row, self.col).setValue(name)
except:
pass
def setValue(self, text):
return self.text.setText(text)
def getValue(self):
return unicode(self.text.text())
|
spinellic/Mission-Planner | refs/heads/master | Lib/site-packages/scipy/stats/tests/test_fit.py | 53 | # NOTE: contains only one test, _est_cont_fit, that is renamed so that
# nose doesn't run it
# I put this here for the record and for the case when someone wants to
# verify the quality of fit
# with current parameters: relatively small sample size, default starting values
# Ran 84 tests in 401.797s
# FAILED (failures=15)
import numpy.testing as npt
import numpy as np
from scipy import stats
from test_continuous_basic import distcont
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
n_repl1 = 1000 # sample size for first run
n_repl2 = 5000 # sample size for second run, if first run fails
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
#distcont = [['genextreme', (3.3184017469423535,)]]
def _est_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample size <= 10000
for distname, arg in distcont:
yield check_cont_fit, distname,arg
def check_cont_fit(distname,arg):
distfn = getattr(stats, distname)
rvs = distfn.rvs(size=n_repl1,*arg)
est = distfn.fit(rvs) #,*arg) # start with default values
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
txt = ''
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.ones(distfn.numargs+2)*thresh_min]),0)
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min])
if np.any(np.isnan(est)):
raise AssertionError('nan returned in fit')
else:
if np.any((np.abs(diff) - diffthreshold) > 0.0):
## txt = 'WARNING - diff too large with small sample'
## print 'parameter diff =', diff - diffthreshold, txt
rvs = np.concatenate([rvs,distfn.rvs(size=n_repl2-n_repl1,*arg)])
est = distfn.fit(rvs) #,*arg)
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
if np.any((np.abs(diff) - diffthreshold) > 0.0):
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError('fit not very good in %s\n' % distfn.name + txt)
if __name__ == "__main__":
import nose
#nose.run(argv=['', __file__])
nose.runmodule(argv=[__file__,'-s'], exit=False)
|
boyuegame/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/test/test_cmd.py | 74 | """
Test script for the 'cmd' module
Original by Michael Schneider
"""
import cmd
import sys
import re
import unittest
import io
from test import support
class samplecmdclass(cmd.Cmd):
"""
Instance the sampleclass:
>>> mycmd = samplecmdclass()
Test for the function parseline():
>>> mycmd.parseline("")
(None, None, '')
>>> mycmd.parseline("?")
('help', '', 'help ')
>>> mycmd.parseline("?help")
('help', 'help', 'help help')
>>> mycmd.parseline("!")
('shell', '', 'shell ')
>>> mycmd.parseline("!command")
('shell', 'command', 'shell command')
>>> mycmd.parseline("func")
('func', '', 'func')
>>> mycmd.parseline("func arg1")
('func', 'arg1', 'func arg1')
Test for the function onecmd():
>>> mycmd.onecmd("")
>>> mycmd.onecmd("add 4 5")
9
>>> mycmd.onecmd("")
9
>>> mycmd.onecmd("test")
*** Unknown syntax: test
Test for the function emptyline():
>>> mycmd.emptyline()
*** Unknown syntax: test
Test for the function default():
>>> mycmd.default("default")
*** Unknown syntax: default
Test for the function completedefault():
>>> mycmd.completedefault()
This is the completedefault methode
>>> mycmd.completenames("a")
['add']
Test for the function completenames():
>>> mycmd.completenames("12")
[]
>>> mycmd.completenames("help")
['help']
Test for the function complete_help():
>>> mycmd.complete_help("a")
['add']
>>> mycmd.complete_help("he")
['help']
>>> mycmd.complete_help("12")
[]
>>> sorted(mycmd.complete_help(""))
['add', 'exit', 'help', 'shell']
Test for the function do_help():
>>> mycmd.do_help("testet")
*** No help on testet
>>> mycmd.do_help("add")
help text for add
>>> mycmd.onecmd("help add")
help text for add
>>> mycmd.do_help("")
<BLANKLINE>
Documented commands (type help <topic>):
========================================
add help
<BLANKLINE>
Undocumented commands:
======================
exit shell
<BLANKLINE>
Test for the function print_topics():
>>> mycmd.print_topics("header", ["command1", "command2"], 2 ,10)
header
======
command1
command2
<BLANKLINE>
Test for the function columnize():
>>> mycmd.columnize([str(i) for i in range(20)])
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
>>> mycmd.columnize([str(i) for i in range(20)], 10)
0 7 14
1 8 15
2 9 16
3 10 17
4 11 18
5 12 19
6 13
This is a interactive test, put some commands in the cmdqueue attribute
and let it execute
This test includes the preloop(), postloop(), default(), emptyline(),
parseline(), do_help() functions
>>> mycmd.use_rawinput=0
>>> mycmd.cmdqueue=["", "add", "add 4 5", "help", "help add","exit"]
>>> mycmd.cmdloop()
Hello from preloop
help text for add
*** invalid number of arguments
9
<BLANKLINE>
Documented commands (type help <topic>):
========================================
add help
<BLANKLINE>
Undocumented commands:
======================
exit shell
<BLANKLINE>
help text for add
Hello from postloop
"""
def preloop(self):
print("Hello from preloop")
def postloop(self):
print("Hello from postloop")
def completedefault(self, *ignored):
print("This is the completedefault methode")
def complete_command(self):
print("complete command")
def do_shell(self, s):
pass
def do_add(self, s):
l = s.split()
if len(l) != 2:
print("*** invalid number of arguments")
return
try:
l = [int(i) for i in l]
except ValueError:
print("*** arguments should be numbers")
return
print(l[0]+l[1])
def help_add(self):
print("help text for add")
return
def do_exit(self, arg):
return True
class TestAlternateInput(unittest.TestCase):
class simplecmd(cmd.Cmd):
def do_print(self, args):
print(args, file=self.stdout)
def do_EOF(self, args):
return True
class simplecmd2(simplecmd):
def do_EOF(self, args):
print('*** Unknown syntax: EOF', file=self.stdout)
return True
def test_file_with_missing_final_nl(self):
input = io.StringIO("print test\nprint test2")
output = io.StringIO()
cmd = self.simplecmd(stdin=input, stdout=output)
cmd.use_rawinput = False
cmd.cmdloop()
self.assertMultiLineEqual(output.getvalue(),
("(Cmd) test\n"
"(Cmd) test2\n"
"(Cmd) "))
def test_input_reset_at_EOF(self):
input = io.StringIO("print test\nprint test2")
output = io.StringIO()
cmd = self.simplecmd2(stdin=input, stdout=output)
cmd.use_rawinput = False
cmd.cmdloop()
self.assertMultiLineEqual(output.getvalue(),
("(Cmd) test\n"
"(Cmd) test2\n"
"(Cmd) *** Unknown syntax: EOF\n"))
input = io.StringIO("print \n\n")
output = io.StringIO()
cmd.stdin = input
cmd.stdout = output
cmd.cmdloop()
self.assertMultiLineEqual(output.getvalue(),
("(Cmd) \n"
"(Cmd) \n"
"(Cmd) *** Unknown syntax: EOF\n"))
def test_main(verbose=None):
from test import test_cmd
support.run_doctest(test_cmd, verbose)
support.run_unittest(TestAlternateInput)
def test_coverage(coverdir):
trace = support.import_module('trace')
tracer=trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,],
trace=0, count=1)
tracer.run('import importlib; importlib.reload(cmd); test_main()')
r=tracer.results()
print("Writing coverage results...")
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if __name__ == "__main__":
if "-c" in sys.argv:
test_coverage('/tmp/cmd.cover')
elif "-i" in sys.argv:
samplecmdclass().cmdloop()
else:
test_main()
|
jgmize/kuma | refs/heads/master | kuma/wiki/feeds.py | 1 | """Feeds for documents"""
import datetime
import json
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.db.models import F
from django.utils.feedgenerator import (Atom1Feed, Rss201rev2Feed,
SyndicationFeed)
from django.utils.html import escape
from django.utils.translation import ugettext as _
from kuma.core.templatetags.jinja_helpers import add_utm
from kuma.core.urlresolvers import reverse
from kuma.core.validators import valid_jsonp_callback_value
from kuma.users.templatetags.jinja_helpers import gravatar_url
from .models import Document, Revision
from .templatetags.jinja_helpers import (colorize_diff, diff_table,
get_compare_url, tag_diff_table)
MAX_FEED_ITEMS = getattr(settings, 'MAX_FEED_ITEMS', 500)
DEFAULT_FEED_ITEMS = 50
class DocumentsFeed(Feed):
title = _('MDN documents')
subtitle = _('Documents authored by MDN users')
link = _('/')
def __call__(self, request, *args, **kwargs):
self.request = request
if 'all_locales' in request.GET:
self.locale = None
else:
self.locale = request.LANGUAGE_CODE
return super(DocumentsFeed, self).__call__(request, *args, **kwargs)
def feed_extra_kwargs(self, obj):
return {'request': self.request}
def item_extra_kwargs(self, obj):
return {'obj': obj}
def get_object(self, request, format):
if format == 'json':
self.feed_type = DocumentJSONFeedGenerator
elif format == 'rss':
self.feed_type = Rss201rev2Feed
else:
self.feed_type = Atom1Feed
def item_pubdate(self, document):
return document.current_revision.created
def item_title(self, document):
return document.title
def item_description(self, document):
return document.current_revision.summary
def item_author_name(self, document):
return document.current_revision.creator.username
def item_author_link(self, document):
return add_utm(
self.request.build_absolute_uri(
document.current_revision.creator.get_absolute_url()),
'feed', medium='rss')
def item_link(self, document):
return add_utm(
self.request.build_absolute_uri(document.get_absolute_url()),
'feed', medium='rss')
def item_categories(self, document):
return document.tags.all()
class DocumentJSONFeedGenerator(SyndicationFeed):
"""JSON feed generator for Documents
TODO: Someday maybe make this into a JSON Activity Stream?"""
mime_type = 'application/json'
def _encode_complex(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
def write(self, outfile, encoding):
request = self.feed['request']
# Check for a callback param, validate it before use
callback = request.GET.get('callback', None)
if callback is not None:
if not valid_jsonp_callback_value(callback):
callback = None
items_out = []
for item in self.items:
document = item['obj']
# Include some of the simple elements from the preprocessed item
item_out = dict((x, item[x]) for x in (
'link', 'title', 'pubdate', 'author_name', 'author_link',
))
# HACK: DocumentFeed is the superclass of RevisionFeed. In this
# case, current_revision is the revision itself.
# TODO: Refactor this out into separate DocumentFeed and
# RevisionFeed subclasses of Feed.
if hasattr(document, 'current_revision'):
revision = document.current_revision
else:
revision = document
if revision.creator.email:
item_out['author_avatar'] = gravatar_url(revision.creator.email)
summary = getattr(revision, 'summary', None)
if summary:
item_out['summary'] = summary
# Linkify the tags used in the feed item
categories = dict(
(x, request.build_absolute_uri(
reverse('wiki.tag', kwargs={'tag': x})))
for x in item['categories']
)
if categories:
item_out['categories'] = categories
items_out.append(item_out)
data = items_out
if callback:
outfile.write('%s(' % callback)
outfile.write(json.dumps(data, default=self._encode_complex))
if callback:
outfile.write(')')
class DocumentsRecentFeed(DocumentsFeed):
"""
Feed of recently revised documents
"""
title = _('MDN recent document changes')
subtitle = _('Recent changes to MDN documents')
def get_object(self, request, format, tag=None):
super(DocumentsRecentFeed, self).get_object(request, format)
self.tag = tag
if tag:
self.title = _('MDN recent changes to documents tagged %s' % tag)
self.link = self.request.build_absolute_uri(
reverse('wiki.tag', args=(tag,)))
else:
self.link = self.request.build_absolute_uri(
reverse('wiki.all_documents'))
def items(self):
# Temporarily storing the selected documents PKs in a list
# to speed up retrieval (max MAX_FEED_ITEMS size)
item_pks = (Document.objects
.filter_for_list(tag_name=self.tag,
locale=self.locale)
.filter(current_revision__isnull=False)
.order_by('-current_revision__created')
.values_list('pk', flat=True)[:MAX_FEED_ITEMS])
return (Document.objects.filter(pk__in=list(item_pks))
.defer('html')
.prefetch_related('current_revision',
'current_revision__creator',
'tags'))
class DocumentsReviewFeed(DocumentsRecentFeed):
"""
Feed of documents in need of review
"""
title = _('MDN documents in need of review')
subtitle = _('Recent changes to MDN documents that need to be reviewed')
def get_object(self, request, format, tag=None):
super(DocumentsReviewFeed, self).get_object(request, format)
self.subtitle = None
if tag:
self.title = _('MDN documents for %s review' % tag)
self.link = self.request.build_absolute_uri(
reverse('wiki.list_review_tag', args=(tag,)))
else:
self.title = _('MDN documents for review')
self.link = self.request.build_absolute_uri(
reverse('wiki.list_review'))
return tag
def items(self, tag=None):
# Temporarily storing the selected documents PKs in a list
# to speed up retrieval (max MAX_FEED_ITEMS size)
item_pks = (Document.objects
.filter_for_review(tag_name=tag, locale=self.locale)
.filter(current_revision__isnull=False)
.order_by('-current_revision__created')
.values_list('pk', flat=True)[:MAX_FEED_ITEMS])
return (Document.objects.filter(pk__in=list(item_pks))
.defer('html')
.prefetch_related('current_revision',
'current_revision__creator',
'tags'))
class DocumentsUpdatedTranslationParentFeed(DocumentsFeed):
"""Feed of translated documents whose parent has been modified since the
translation was last updated."""
description_template = 'wiki/feed_docs_updated.html'
def get_object(self, request, format, tag=None):
super(DocumentsUpdatedTranslationParentFeed,
self).get_object(request, format)
self.subtitle = None
self.title = _("MDN '%s' translations in need of update" %
self.locale)
# TODO: Need an HTML / dashboard version of this feed
self.link = self.request.build_absolute_uri(
reverse('wiki.all_documents'))
def items(self):
return (Document.objects
.prefetch_related('parent')
.filter(locale=self.locale, parent__isnull=False)
.filter(modified__lt=F('parent__modified'))
.order_by('-parent__current_revision__created')
[:MAX_FEED_ITEMS])
def get_context_data(self, **kwargs):
context = super(DocumentsUpdatedTranslationParentFeed,
self).get_context_data(**kwargs)
obj = context.get('obj')
trans_based_on_pk = (Revision.objects.filter(document=obj.parent)
.filter(created__lte=obj.modified)
.order_by('created')
.values_list('pk', flat=True)
.first())
mod_url = get_compare_url(obj.parent,
trans_based_on_pk,
obj.parent.current_revision.id)
context['mod_url'] = mod_url
return context
class RevisionsFeed(DocumentsFeed):
"""
Feed of recent revisions
"""
title = _('MDN recent revisions')
subtitle = _('Recent revisions to MDN documents')
def items(self):
items = Revision.objects
limit = int(self.request.GET.get('limit', DEFAULT_FEED_ITEMS))
page = int(self.request.GET.get('page', 1))
start = (page - 1) * limit
finish = start + limit
if not limit or limit > MAX_FEED_ITEMS:
limit = MAX_FEED_ITEMS
if self.locale:
items = items.filter(document__locale=self.locale)
# Temporarily storing the selected revision PKs in a list
# to speed up retrieval (max MAX_FEED_ITEMS size)
item_pks = (items.order_by('-created')
.values_list('pk', flat=True)[start:finish])
return (Revision.objects.filter(pk__in=list(item_pks))
.prefetch_related('creator',
'document'))
def item_title(self, item):
return '%s (%s)' % (item.document.slug, item.document.locale)
def item_description(self, item):
# TODO: put this in a jinja template if django syndication will let us
previous = item.previous
if previous is None:
action = u'Created'
else:
action = u'Edited'
by = u'<h3>%s by:</h3><p>%s</p>' % (action, item.creator.username)
if item.comment:
comment = u'<h3>Comment:</h3><p>%s</p>' % item.comment
else:
comment = u''
review_diff = u''
tag_diff = u''
content_diff = u''
if previous:
prev_review_tags = previous.review_tags.names()
curr_review_tags = item.review_tags.names()
if set(prev_review_tags) != set(curr_review_tags):
table = tag_diff_table(u','.join(prev_review_tags),
u','.join(curr_review_tags),
previous.id, item.id)
review_diff = u'<h3>Review changes:</h3>%s' % table
review_diff = colorize_diff(review_diff)
if previous.tags != item.tags:
table = tag_diff_table(previous.tags, item.tags,
previous.id, item.id)
tag_diff = u'<h3>Tag changes:</h3>%s' % table
tag_diff = colorize_diff(tag_diff)
previous_content = ''
previous_id = u'N/A'
content_diff = u'<h3>Content changes:</h3>'
if previous:
previous_content = previous.get_tidied_content()
current_content = item.get_tidied_content()
previous_id = previous.id
if previous_content != current_content:
content_diff = content_diff + diff_table(
previous_content, current_content,
previous_id, item.id)
content_diff = colorize_diff(content_diff)
else:
content_diff = content_diff + escape(item.content)
link_cell = u'<td><a href="%s">%s</a></td>'
view_cell = link_cell % (add_utm(item.document.get_absolute_url(),
'feed', medium='rss'),
_('View Page'))
edit_cell = link_cell % (add_utm(item.document.get_edit_url(),
'feed', medium='rss'),
_('Edit Page'))
if previous:
compare_cell = link_cell % (
add_utm(
get_compare_url(item.document, previous.id, item.id),
'feed',
medium='rss'
),
_('Show comparison')
)
else:
compare_cell = ''
history_cell = link_cell % (
add_utm(
reverse(
'wiki.document_revisions', args=[item.document.slug]
),
'feed',
medium='rss'
),
_('History')
)
links_table = u'<table border="0" width="80%">'
links_table = links_table + u'<tr>%s%s%s%s</tr>' % (view_cell,
edit_cell,
compare_cell,
history_cell)
links_table = links_table + u'</table>'
return u''.join([by, comment,
tag_diff, review_diff, content_diff, links_table])
def item_link(self, item):
return add_utm(
self.request.build_absolute_uri(item.document.get_absolute_url()),
'feed', medium='rss')
def item_pubdate(self, item):
return item.created
def item_author_name(self, item):
return item.creator.username
def item_author_link(self, item):
return add_utm(
self.request.build_absolute_uri(item.creator.get_absolute_url()),
'feed', medium='rss')
def item_categories(self, item):
return []
|
im-infamou5/volatility | refs/heads/master | volatility/plugins/addrspaces/vboxelf.py | 44 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (C) 2005,2006,2007 4tphi Research
#
# Authors:
# {npetroni,awalters}@4tphi.net (Nick Petroni and AAron Walters)
# phil@teuwen.org (Philippe Teuwen)
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
# References:
# VirtualBox core format:
# http://www.virtualbox.org/manual/ch12.html#guestcoreformat
# http://www.virtualbox.org/svn/vbox/trunk/include/VBox/vmm/dbgfcorefmt.h
# http://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR3/DBGFCoreWrite.cpp
import volatility.obj as obj
import volatility.addrspace as addrspace
#pylint: disable-msg=C0111
NT_VBOXCORE = 0xb00
NT_VBOXCPU = 0xb01
DBGFCORE_MAGIC = 0xc01ac0de
DBGFCORE_FMT_VERSION = 0x00010000
class DBGFCOREDESCRIPTOR(obj.CType):
"""A class for VBox core dump descriptors"""
@property
def Major(self):
return (self.u32VBoxVersion >> 24) & 0xFF
@property
def Minor(self):
return (self.u32VBoxVersion >> 16) & 0xFF
@property
def Build(self):
return self.u32VBoxVersion & 0xFFFF
class VirtualBoxModification(obj.ProfileModification):
def modification(self, profile):
profile.vtypes.update({
'DBGFCOREDESCRIPTOR' : [ 24, {
'u32Magic' : [ 0, ['unsigned int']],
'u32FmtVersion' : [ 4, ['unsigned int']],
'cbSelf' : [ 8, ['unsigned int']],
'u32VBoxVersion' : [ 12, ['unsigned int']],
'u32VBoxRevision' : [ 16, ['unsigned int']],
'cCpus' : [ 20, ['unsigned int']],
}]})
profile.object_classes.update({'DBGFCOREDESCRIPTOR': DBGFCOREDESCRIPTOR})
class VirtualBoxCoreDumpElf64(addrspace.AbstractRunBasedMemory):
""" This AS supports VirtualBox ELF64 coredump format """
order = 30
def __init__(self, base, config, **kwargs):
## We must have an AS below us
self.as_assert(base, "No base Address Space")
addrspace.AbstractRunBasedMemory.__init__(self, base, config, **kwargs)
## Quick test (before instantiating an object)
## for ELF64, little-endian - ELFCLASS64 and ELFDATA2LSB
self.as_assert(base.read(0, 6) == '\x7fELF\x02\x01',
"ELF64 Header signature invalid")
## Base AS should be a file AS
elf = obj.Object("elf64_hdr", offset = 0, vm = base)
## Make sure its a core dump
self.as_assert(str(elf.e_type) == 'ET_CORE',
"ELF64 type is not a Core file")
## Tuple of (physical memory address, file offset, length)
self.runs = []
## The PT_NOTE core descriptor structure
self.header = None
for phdr in elf.program_headers():
## The first note should be the VBCORE segment
if str(phdr.p_type) == 'PT_NOTE':
note = phdr.p_offset.dereference_as("elf64_note")
if note.namesz == 'VBCORE' and note.n_type == NT_VBOXCORE:
self.header = note.cast_descsz("DBGFCOREDESCRIPTOR")
continue
# Only keep load segments with valid file sizes
if (str(phdr.p_type) != 'PT_LOAD' or
phdr.p_filesz == 0 or
phdr.p_filesz != phdr.p_memsz):
continue
self.runs.append((int(phdr.p_paddr),
int(phdr.p_offset),
int(phdr.p_memsz)))
self.as_assert(self.header, 'ELF error: did not find any PT_NOTE segment with VBCORE')
self.as_assert(self.header.u32Magic == DBGFCORE_MAGIC, 'Could not find VBox core magic signature')
self.as_assert(self.header.u32FmtVersion == DBGFCORE_FMT_VERSION, 'Unknown VBox core format version')
self.as_assert(self.runs, 'ELF error: did not find any LOAD segment with main RAM')
|
pyfa-org/Pyfa | refs/heads/master | eos/db/saveddata/miscData.py | 4 | # ===============================================================================
# Copyright (C) 2011 Anton Vorobyov
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from sqlalchemy import Column, Table, String
from sqlalchemy.orm import mapper
from eos.db import saveddata_meta
from eos.saveddata.miscData import MiscData
miscdata_table = Table("miscdata", saveddata_meta,
Column("fieldName", String, primary_key=True),
Column("fieldValue", String))
mapper(MiscData, miscdata_table)
|
vmanoria/bluemix-hue-filebrowser | refs/heads/master | hue-3.8.1-bluemix/desktop/core/ext-py/Django-1.6.10/django/conf/app_template/models.py | 10644 | from django.db import models
# Create your models here.
|
siosio/intellij-community | refs/heads/master | python/testData/formatter/blankLinesAroundFirstMethod.py | 24 | class C1:
# comment 1
# comment 2
def __init__(self):
pass
class C2:
# comment 2
def __init__(self):
pass
class C3:
def __init__(self):
pass
class C4:
"""Docstring."""
# comment 1
# comment 2
def __init__(self):
pass
class C5:
"""Docstring."""
# comment 2
def __init__(self):
pass
class C6:
"""Docstring."""
def __init__(self):
pass
class C7:
attr = 42
# comment 1
# comment 2
def __init__(self):
pass
class C8:
attr = 42
# comment 2
def __init__(self):
pass
class C9:
attr = 42
def __init__(self):
pass
class C10: # comment before statement list
def __init__(self):
pass
|
adviti/melange | refs/heads/master | tests/app/soc/logic/helper/test_timeline.py | 1 | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for soc.logic.helper.timeline.
"""
import unittest
from datetime import datetime
from datetime import timedelta
from soc.logic.helper import timeline
from soc.models import timeline as timeline_model
from soc.models.sponsor import Sponsor
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class TimelineTest(unittest.TestCase):
"""Tests for timeline helper functions.
"""
def setUp(self):
self.timeline = seeder_logic.seed(timeline_model.Timeline)
def testIsBeforePeriod(self):
"""Tests if a correct bool is returned if the current DateTime is before
a given period_start.
"""
#program is yet to start.
self.timeline.program_start = datetime.utcnow() + timedelta(10)
self.assertTrue(timeline.isBeforePeriod(self.timeline, 'program'))
#program has already started.
self.timeline.program_start = datetime.utcnow() - timedelta(10)
self.assertFalse(timeline.isBeforePeriod(self.timeline, 'program'))
#student signup period is yet to start.
self.timeline.student_signup_start = datetime.utcnow() + timedelta(10)
self.assertTrue(timeline.isBeforePeriod(self.timeline, 'student_signup'))
#student sign up period has already started.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(10)
self.assertFalse(timeline.isBeforePeriod(self.timeline, 'student_signup'))
#event not in the timeline.
self.assertFalse(timeline.isBeforePeriod(self.timeline, 'other_event'))
def testIsBeforeEvent(self):
"""Tests if a correct bool is returned if current DateTime
is before a given event.
"""
#program has not started.
self.timeline.program_start = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isBeforeEvent(self.timeline, 'program_start'))
#program has already started.
self.timeline.program_start = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isBeforeEvent(self.timeline, 'program_start'))
#program has not ended.
self.timeline.program_end = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isBeforeEvent(self.timeline, 'program_end'))
#program has ended.
self.timeline.program_end = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isBeforeEvent(self.timeline, 'program_end'))
#the deadline to announce accepted organizations has not passed.
self.timeline.accepted_organization_announced_deadline = (datetime.utcnow()
+ timedelta(20))
self.assertTrue(timeline.isBeforeEvent(
self.timeline, "accepted_organization_announced_deadline"))
#the deadline to announce accepted organizations has been passed.
self.timeline.accepted_organization_announced_deadline = (datetime.utcnow()
- timedelta(20))
self.assertFalse(timeline.isBeforeEvent(
self.timeline, "accepted_organization_announced_deadline"))
#student sign up period has not started.
self.timeline.student_signup_start = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isBeforeEvent(self.timeline,
'student_signup_start'))
#student sign up period has already started.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isBeforeEvent(self.timeline,
'student_signup_start'))
#student sign up period has not ended.
self.timeline.student_signup_end = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isBeforeEvent(self.timeline,
'student_signup_end'))
#student sign up period has already ended.
self.timeline.student_signup_end = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isBeforeEvent(self.timeline,
'student_signup_end'))
#event not in the timeline.
self.assertFalse(timeline.isBeforeEvent(self.timeline, 'other_event'))
def testIsActivePeriod(self):
"""Tests if a correct boolean is returned if the current DateTime is
between period_start and period_end.
"""
#program is going on.
self.timeline.program_start = datetime.utcnow() - timedelta(10)
self.timeline.program_end = datetime.utcnow() + timedelta(10)
self.assertTrue(timeline.isActivePeriod(self.timeline, 'program'))
#program will start.
self.timeline.program_start = datetime.utcnow() + timedelta(10)
self.timeline.program_end = datetime.utcnow() + timedelta(20)
self.assertFalse(timeline.isActivePeriod(self.timeline, 'program'))
#program has ended.
self.timeline.program_start = datetime.utcnow() - timedelta(20)
self.timeline.program_end = datetime.utcnow() - timedelta(10)
self.assertFalse(timeline.isActivePeriod(self.timeline, 'program'))
#student sign up period will start
self.timeline.student_signup_start = datetime.utcnow() + timedelta(10)
self.timeline.student_signup_end = datetime.utcnow() + timedelta(30)
self.assertFalse(timeline.isActivePeriod(self.timeline, 'student_signup'))
#student sign up period has not ended.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(10)
self.timeline.student_signup_end = datetime.utcnow() + timedelta(20)
self.assertTrue(timeline.isActivePeriod(self.timeline,
'student_signup'))
#student sign up period has already ended.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(30)
self.timeline.student_signup_end = datetime.utcnow() - timedelta(20)
self.assertFalse(timeline.isActivePeriod(self.timeline,
'student_signup'))
#event not in the timeline.
self.assertFalse(timeline.isActivePeriod(self.timeline, 'other_event'))
def testActivePeriod(self):
"""Tests if the start and end of a specified period is returned.
"""
start = datetime(2011, 4, 3)
end = datetime(2020, 4, 3)
self.timeline.program_start = start
self.timeline.program_end = end
actual = timeline.activePeriod(self.timeline, 'program')
expected = (start, end)
self.assertEqual(actual, expected)
start = datetime(2011, 7, 4)
end = datetime(2021, 7, 5)
self.timeline.student_signup_start = start
self.timeline.student_signup_end = end
actual = timeline.activePeriod(self.timeline, 'student_sign_up')
expected = (start, end)
self.assertEqual(actual, expected)
#event not in the timeline.
expected = (None, None)
actual = timeline.activePeriod(self.timeline, 'some_other_event')
self.assertEqual(actual, expected)
def testIsAfterPeriod(self):
"""Tests if True is returned if current DateTime is after period_end.
"""
#program has ended.
self.timeline.program_end = datetime.utcnow() - timedelta(10)
self.assertTrue(timeline.isAfterPeriod(self.timeline, 'program'))
#program has not ended.
self.timeline.program_end = datetime.utcnow() + timedelta(10)
self.assertFalse(timeline.isAfterPeriod(self.timeline, 'program'))
#student sign up has ended.
self.timeline.student_signup_end = datetime.utcnow() - timedelta(10)
self.assertTrue(timeline.isAfterPeriod(self.timeline, 'student_signup'))
#student sign up has not ended.
self.timeline.student_signup_end = datetime.utcnow() + timedelta(10)
self.assertFalse(timeline.isAfterPeriod(self.timeline, 'student_signup'))
#event not in the timeline.
self.assertFalse(timeline.isAfterPeriod(self.timeline, 'some_other_event'))
def testIsAfterEvent(self):
"""Tests if True is returned if current DateTime is after the given event.
"""
#program has started.
self.timeline.program_start = datetime.utcnow() - timedelta(10)
self.assertTrue(timeline.isAfterEvent(self.timeline, 'program_start'))
#program is yet to start.
self.timeline.program_start = datetime.utcnow() + timedelta(10)
self.assertFalse(timeline.isAfterEvent(self.timeline, 'program_start'))
#program has ended.
self.timeline.program_start = datetime.utcnow() - timedelta(30)
self.timeline.program_end = datetime.utcnow() - timedelta(20)
self.assertTrue(timeline.isAfterEvent(self.timeline, 'program_end'))
#the deadline to announce accepted organizations has not passed.
self.timeline.accepted_organization_announced_deadline = (datetime.utcnow()
+ timedelta(20))
self.assertFalse(timeline.isAfterEvent(
self.timeline, "accepted_organization_announced_deadline"))
#the deadline to announce accepted organizations has been passed.
self.timeline.accepted_organization_announced_deadline = (datetime.utcnow()
- timedelta(20))
self.assertTrue(timeline.isAfterEvent(
self.timeline, "accepted_organization_announced_deadline"))
#student sign up period has not started.
self.timeline.student_signup_start = datetime.utcnow() + timedelta(20)
self.assertFalse(timeline.isAfterEvent(self.timeline,
'student_signup_start'))
#student sign up period has already started.
self.timeline.student_signup_start = datetime.utcnow() - timedelta(20)
self.assertTrue(timeline.isAfterEvent(self.timeline,
'student_signup_start'))
#student sign up period has not ended.
self.timeline.student_signup_end = datetime.utcnow() + timedelta(20)
self.assertFalse(timeline.isAfterEvent(self.timeline,
'student_signup_end'))
#student sign up period has already ended.
self.timeline.student_signup_end = datetime.utcnow() - timedelta(20)
self.assertTrue(timeline.isAfterEvent(self.timeline,
'student_signup_end'))
#event not in the Timeline.
self.assertFalse(timeline.isAfterEvent(self.timeline, 'some_other_event'))
def testGetDateTimeByname(self):
"""Tests that a DateTime property with a given name is returned.
"""
self.timeline.program_start = datetime(2011, 7, 1)
#name is available in the timeline.
name = 'program_start'
entity = self.timeline
expected = self.timeline.program_start
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
self.timeline.program_end = datetime(2012, 7, 4)
name = 'program_end'
entity = self.timeline
expected = self.timeline.program_end
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
self.timeline.student_signup_start = datetime(2011, 9, 5)
name = 'student_signup_start'
entity = self.timeline
expected = self.timeline.student_signup_start
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
self.timeline.student_signup_end = datetime(2011, 12, 4)
name = 'student_signup_end'
entity = self.timeline
expected = self.timeline.student_signup_end
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
self.timeline.accepted_organization_announced_deadline = datetime(2011, 5, 4)
name = 'accepted_organizations_announced_deadline'
entity = self.timeline
expected = self.timeline.accepted_organization_announced_deadline
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(actual, expected)
#name is not available in the timeline.
name = 'some_name'
entity = self.timeline
expected = None
actual = timeline.getDateTimeByName(entity, name)
self.assertEqual(expected, actual)
|
sivel/ansible | refs/heads/devel | lib/ansible/parsing/yaml/__init__.py | 2520 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
zzqcn/wireshark | refs/heads/zzqcn | tools/generate-sysdig-event.py | 2 | #!/usr/bin/env python3
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <gerald@wireshark.org>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
'''\
Generate Sysdig event dissector sections from the sysdig sources.
Reads driver/event_table.c and driver/ppm_events_public.h and generates
corresponding dissection code in packet-sysdig-event.c. Updates are
performed in-place in the dissector code.
Requires an Internet connection. Assets are loaded from GitHub over HTTPS.
'''
import logging
import os
import os.path
import re
import urllib.request, urllib.error, urllib.parse
import sys
sysdig_repo_pfx = 'https://raw.githubusercontent.com/draios/sysdig/0.26.1/'
def exit_msg(msg=None, status=1):
if msg is not None:
sys.stderr.write(msg + '\n\n')
sys.stderr.write(__doc__ + '\n')
sys.exit(status)
def get_url_lines(url):
'''Open a URL.
Returns the URL body as a list of lines.
'''
req_headers = { 'User-Agent': 'Wireshark generate-sysdig-event' }
try:
req = urllib.request.Request(url, headers=req_headers)
response = urllib.request.urlopen(req)
lines = response.read().decode().splitlines()
response.close()
except urllib.error.HTTPError as err:
exit_msg("HTTP error fetching {0}: {1}".format(url, err.reason))
except urllib.error.URLError as err:
exit_msg("URL error fetching {0}: {1}".format(url, err.reason))
except OSError as err:
exit_msg("OS error fetching {0}".format(url, err.strerror))
except Exception:
exit_msg("Unexpected error:", sys.exc_info()[0])
return lines
ppm_ev_pub_lines = get_url_lines(sysdig_repo_pfx + 'driver/ppm_events_public.h')
ppme_re = re.compile('^\s+PPME_([A-Z0-9_]+_[EX])\s*=\s*([0-9]+)\s*,')
event_info_d = {}
def get_event_defines():
event_d = {}
for line in ppm_ev_pub_lines:
m = ppme_re.match(line)
if m:
event_d[int(m.group(2))] = m.group(1)
return event_d
ppm_ev_table_lines = get_url_lines(sysdig_repo_pfx + 'driver/event_table.c')
hf_d = {}
event_info_re = re.compile('^\s+/\*\s*PPME_.*\*\/\s*{\s*"([A-Za-z0-9_]+)"\s*,[^,]+,[^,]+,\s*([0-9]+)\s*[,{}]')
event_param_re = re.compile('{\s*"([A-Za-z0-9_]+)"\s*,\s*PT_([A-Z0-9_]+)\s*,\s*PF_([A-Z0-9_]+)\s*[,}]')
def get_event_names():
'''Return a contiguous list of event names. Names are lower case.'''
event_name_l = []
for line in ppm_ev_table_lines:
ei = event_info_re.match(line)
if ei:
event_name_l.append(ei.group(1))
return event_name_l
# PT_xxx to FT_xxx
pt_to_ft = {
'BYTEBUF': 'BYTES',
'CHARBUF': 'STRING',
'FD': 'INT64',
'FSPATH': 'STRING',
}
# FT_xxx to BASE_xxx
force_param_formats = {
'STRING': 'NONE',
'INT.*': 'DEC',
}
def get_event_params():
'''Return a list of dictionaries containing event names and parameter info.'''
event_param_l = []
event_num = 0
force_string_l = ['args', 'env']
for line in ppm_ev_table_lines:
ei = event_info_re.match(line)
ep = event_param_re.findall(line)
if ei and ep:
event_name = ei.group(1)
src_param_count = int(ei.group(2))
if len(ep) != src_param_count:
err_msg = '{}: found {} parameters. Expected {}. Params: {}'.format(
ei.group(1), len(ep), src_param_count, repr(ep))
if len(ep) > src_param_count:
logging.warning(err_msg)
del ep[src_param_count:]
else:
raise NameError(err_msg)
for p in ep:
if p[0] in force_string_l:
param_type = 'STRING'
elif p[1] in pt_to_ft:
param_type = pt_to_ft[p[1]]
elif p[0] == 'flags' and p[1].startswith('INT') and 'HEX' in p[2]:
param_type = 'U' + p[1]
elif 'INT' in p[1]:
# Ints
param_type = p[1]
else:
# Fall back to bytes
param_type = 'BYTES'
if p[2] == 'NA':
if 'INT' in param_type:
param_format = 'DEC'
else:
param_format = 'NONE'
elif param_type == 'BYTES':
param_format = 'NONE'
else:
param_format = p[2]
for pt_pat, force_pf in force_param_formats.items():
if re.match(pt_pat, param_type) and param_format != force_pf:
err_msg = 'Forcing {} {} format to {}. Params: {}'.format(
event_name, param_type, force_pf, repr(ep))
logging.warning(err_msg)
param_format = force_pf
param_d = {
'event_name': event_name,
'event_num': event_num,
'param_name': p[0],
'param_type': param_type,
'param_format': param_format,
}
event_param_l.append(param_d)
if ei:
event_num += 1
return event_param_l
def param_to_hf_name(param):
return 'hf_param_{}_{}'.format(param['param_name'], param['param_type'].lower())
def param_to_value_string_name(param):
return '{}_{}_vals'.format(param['param_name'], param['param_type'].lower())
def get_param_desc(param):
# Try to coerce event names and parameters into human-friendly
# strings.
# XXX This could use some work.
# Specific descriptions. Event name + parameter name.
param_descs = {
'accept.queuepct': 'Accept queue per connection',
'execve.args': 'Program arguments',
'execve.comm': 'Command',
'execve.cwd': 'Current working directory',
}
# General descriptions. Event name only.
event_descs = {
'ioctl': 'I/O control',
}
event_name = param['event_name']
param_id = '{}.{}'.format(event_name, param['param_name'])
if param_id in param_descs:
param_desc = param_descs[param_id]
elif event_name in event_descs:
param_desc = '{}: {}'.format(event_descs[event_name], param['param_name'])
else:
param_desc = param['param_name']
return param_desc
def main():
logging.basicConfig(format='%(levelname)s: %(message)s')
# Event list
event_d = get_event_defines()
event_nums = list(event_d.keys())
event_nums.sort()
event_name_l = get_event_names()
event_param_l = get_event_params()
hf_d = {}
for param in event_param_l:
hf_name = param_to_hf_name(param)
hf_d[hf_name] = param
idx_id_to_name = { '': 'no' }
parameter_index_l = []
for en in range (0, len(event_nums)):
param_id = ''
param_l = []
event_var = event_d[en].lower()
for param in event_param_l:
if param['event_num'] == en:
hf_name = param_to_hf_name(param)
param_l.append(hf_name)
param_id += ':' + param['param_name'] + '_' + param['param_type']
ei_str = ''
if param_id not in idx_id_to_name:
idx_id_to_name[param_id] = event_var
ei_str = 'static int * const {}_indexes[] = {{ &{}, NULL }};'.format(
event_var,
', &'.join(param_l)
)
else:
ei_str = '#define {}_indexes {}_indexes'.format(event_var, idx_id_to_name[param_id])
parameter_index_l.append(ei_str)
dissector_path = os.path.join(os.path.dirname(__file__),
'..', 'epan', 'dissectors', 'packet-sysdig-event.c')
dissector_f = open(dissector_path, 'r')
dissector_lines = list(dissector_f)
dissector_f = open(dissector_path, 'w+')
# Strip out old content
strip_re_l = []
strip_re_l.append(re.compile('^static\s+int\s+hf_param_.*;'))
strip_re_l.append(re.compile('^#define\s+EVT_STR_[A-Z0-9_]+\s+"[A-Za-z0-9_]+"'))
strip_re_l.append(re.compile('^#define\s+EVT_[A-Z0-9_]+\s+[0-9]+'))
strip_re_l.append(re.compile('^\s*{\s*EVT_[A-Z0-9_]+\s*,\s*EVT_STR_[A-Z0-9_]+\s*}'))
strip_re_l.append(re.compile('^static\s+const\s+int\s+\*\s*[a-z0-9_]+_[ex]_indexes\[\]\s*=\s*\{\s*&hf_param_.*NULL\s*\}\s*;'))
strip_re_l.append(re.compile('^static\s+int\s*\*\s+const\s+[a-z0-9_]+_[ex]_indexes\[\]\s*=\s*\{\s*&hf_param_.*NULL\s*\}\s*;'))
strip_re_l.append(re.compile('^\s*#define\s+[a-z0-9_]+_[ex]_indexes\s+[a-z0-9_]+_indexes'))
strip_re_l.append(re.compile('^\s*\{\s*EVT_[A-Z0-9_]+_[EX]\s*,\s*[a-z0-9_]+_[ex]_indexes\s*}\s*,'))
strip_re_l.append(re.compile('^\s*{\s*&hf_param_.*},')) # Must all be on one line
for strip_re in strip_re_l:
dissector_lines = [l for l in dissector_lines if not strip_re.search(l)]
# Find our value strings
value_string_re = re.compile('static\s+const\s+value_string\s+([A-Za-z0-9_]+_vals)')
value_string_l = []
for line in dissector_lines:
vs = value_string_re.match(line)
if vs:
value_string_l.append(vs.group(1))
# Add in new content after comments.
header_fields_c = 'Header fields'
header_fields_re = re.compile('/\*\s+' + header_fields_c, flags = re.IGNORECASE)
header_fields_l = []
for hf_name in sorted(hf_d.keys()):
header_fields_l.append('static int {} = -1;'.format(hf_name))
event_names_c = 'Event names'
event_names_re = re.compile('/\*\s+' + event_names_c, flags = re.IGNORECASE)
event_names_l = []
event_str_l = list(set(event_name_l))
event_str_l.sort()
for evt_str in event_str_l:
event_names_l.append('#define EVT_STR_{0:24s} "{1:s}"'.format(evt_str.upper(), evt_str))
event_definitions_c = 'Event definitions'
event_definitions_re = re.compile('/\*\s+' + event_definitions_c, flags = re.IGNORECASE)
event_definitions_l = []
for evt in event_nums:
event_definitions_l.append('#define EVT_{0:24s} {1:3d}'.format(event_d[evt], evt))
value_strings_c = 'Value strings'
value_strings_re = re.compile('/\*\s+' + value_strings_c, flags = re.IGNORECASE)
value_strings_l = []
for evt in event_nums:
evt_num = 'EVT_{},'.format(event_d[evt])
evt_str = 'EVT_STR_' + event_name_l[evt].upper()
value_strings_l.append(' {{ {0:<32s} {1:s} }},'.format(evt_num, evt_str))
parameter_index_c = 'Parameter indexes'
parameter_index_re = re.compile('/\*\s+' + parameter_index_c, flags = re.IGNORECASE)
# parameter_index_l defined above.
event_tree_c = 'Event tree'
event_tree_re = re.compile('/\*\s+' + event_tree_c, flags = re.IGNORECASE)
event_tree_l = []
for evt in event_nums:
evt_num = 'EVT_{}'.format(event_d[evt])
evt_idx = '{}_indexes'.format(event_d[evt].lower())
event_tree_l.append(' {{ {}, {} }},'.format(evt_num, evt_idx))
header_field_reg_c = 'Header field registration'
header_field_reg_re = re.compile('/\*\s+' + header_field_reg_c, flags = re.IGNORECASE)
header_field_reg_l = []
for hf_name in sorted(hf_d.keys()):
param = hf_d[hf_name]
event_name = param['event_name']
param_desc = get_param_desc(param)
param_name = param['param_name']
param_type = param['param_type']
param_format = param['param_format']
fieldconvert = 'NULL'
vs_name = param_to_value_string_name(param)
if vs_name in value_string_l and 'INT' in param_type:
fieldconvert = 'VALS({})'.format(vs_name)
header_field_reg_l.append(' {{ &{}, {{ "{}", "sysdig.param.{}.{}", FT_{}, BASE_{}, {}, 0, NULL, HFILL }} }},'.format(
hf_name,
param_desc,
event_name,
param_name,
param_type,
param_format,
fieldconvert
))
for line in dissector_lines:
fill_comment = None
fill_l = []
if header_fields_re.match(line):
fill_comment = header_fields_c
fill_l = header_fields_l
elif event_names_re.match(line):
fill_comment = event_names_c
fill_l = event_names_l
elif event_definitions_re.match(line):
fill_comment = event_definitions_c
fill_l = event_definitions_l
elif value_strings_re.match(line):
fill_comment = value_strings_c
fill_l = value_strings_l
elif parameter_index_re.match(line):
fill_comment = parameter_index_c
fill_l = parameter_index_l
elif event_tree_re.match(line):
fill_comment = event_tree_c
fill_l = event_tree_l
elif header_field_reg_re.match(line):
fill_comment = header_field_reg_c
fill_l = header_field_reg_l
if fill_comment is not None:
# Write our comment followed by the content
print(('Generating {}, {:d} lines'.format(fill_comment, len(fill_l))))
dissector_f.write('/* {}. Automatically generated by tools/{} */\n'.format(
fill_comment,
os.path.basename(__file__)
))
for line in fill_l:
dissector_f.write('{}\n'.format(line))
# Fill each section only once
del fill_l[:]
else:
# Existing content
dissector_f.write(line)
dissector_f.close()
#
# On with the show
#
if __name__ == "__main__":
sys.exit(main())
|
balloob/home-assistant | refs/heads/dev | tests/components/asuswrt/test_device_tracker.py | 13 | """The tests for the ASUSWRT device tracker platform."""
from homeassistant.components.asuswrt import (
CONF_DNSMASQ,
CONF_INTERFACE,
DATA_ASUSWRT,
DOMAIN,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, patch
async def test_password_or_pub_key_required(hass):
"""Test creating an AsusWRT scanner without a pass or pubkey."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().is_connected = False
result = await async_setup_component(
hass, DOMAIN, {DOMAIN: {CONF_HOST: "fake_host", CONF_USERNAME: "fake_user"}}
)
assert not result
async def test_network_unreachable(hass):
"""Test creating an AsusWRT scanner without a pass or pubkey."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock(side_effect=OSError)
AsusWrt().is_connected = False
result = await async_setup_component(
hass, DOMAIN, {DOMAIN: {CONF_HOST: "fake_host", CONF_USERNAME: "fake_user"}}
)
assert result
assert hass.data.get(DATA_ASUSWRT) is None
async def test_get_scanner_with_password_no_pubkey(hass):
"""Test creating an AsusWRT scanner with a password and no pubkey."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().connection.async_get_connected_devices = AsyncMock(return_value={})
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "fake_host",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "4321",
CONF_DNSMASQ: "/",
}
},
)
assert result
assert hass.data[DATA_ASUSWRT] is not None
async def test_specify_non_directory_path_for_dnsmasq(hass):
"""Test creating an AsusWRT scanner with a dnsmasq location which is not a valid directory."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().is_connected = False
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "fake_host",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "4321",
CONF_DNSMASQ: 1234,
}
},
)
assert not result
async def test_interface(hass):
"""Test creating an AsusWRT scanner using interface eth1."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().connection.async_get_connected_devices = AsyncMock(return_value={})
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "fake_host",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "4321",
CONF_DNSMASQ: "/",
CONF_INTERFACE: "eth1",
}
},
)
assert result
assert hass.data[DATA_ASUSWRT] is not None
async def test_no_interface(hass):
"""Test creating an AsusWRT scanner using no interface."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().is_connected = False
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "fake_host",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "4321",
CONF_DNSMASQ: "/",
CONF_INTERFACE: None,
}
},
)
assert not result
|
UrLab/incubator | refs/heads/main | events/tests/test_forms.py | 1 | from events.forms import EventForm
from datetime import datetime
import pytest
from users.models import User
@pytest.fixture(scope='function')
def user():
user = User.objects.create(username="test", email="test@test.be", first_name="Test", last_name="Test")
return user.id
@pytest.mark.django_db
def test_only_title_and_state_required(user):
form_data = {
'title': 'wtf',
'status': 'i',
'organizer': user,
}
form = EventForm(data=form_data)
assert form.is_valid(), form.errors
@pytest.mark.django_db
def test_no_stop_but_start(user):
form_data = {
'title': 'wtf',
'status': 'i',
'start': datetime(2000, 1, 1),
'organizer': user,
}
form = EventForm(data=form_data)
assert form.is_valid(), form.errors
assert form.cleaned_data['start'] == form.cleaned_data['stop']
assert form.cleaned_data['start'].year == 2000
def test_ready_must_have_date():
form_data = {
'title': 'wtf',
'status': 'r',
}
form = EventForm(data=form_data)
assert not form.is_valid(), form.errors
assert 'Un événement prêt doit avoir une date de début' in form.errors['__all__']
def test_stop_must_be_after_start():
form_data = {
'title': 'wtf',
'status': 'i',
'start': datetime(2100, 1, 1),
'stop': datetime(2000, 1, 1)
}
form = EventForm(data=form_data)
assert not form.is_valid()
assert 'La date de fin ne peut être avant la date de début' in form.errors['__all__']
|
fedora-conary/conary | refs/heads/master | scripts/migration/tablelist.py | 1 | #!/usr/bin/env python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# list of tables we migrate, in order
TableList = [
'LatestMirror',
'CommitLock',
'Branches',
'Items',
'Versions',
'Labels',
'LabelMap',
'Flavors',
'FlavorMap',
'FlavorScores',
'Users',
'UserGroups',
'UserGroupMembers',
'EntitlementGroups',
'Entitlements',
'EntitlementOwners',
'EntitlementAccessMap',
'Permissions',
'FileStreams',
'Nodes',
'ChangeLogs',
'Instances',
'TroveInfo',
'Dependencies',
'Metadata',
'MetadataItems',
'PGPKeys',
'PGPFingerprints',
'Provides',
'Requires',
'TroveRedirects',
'TroveTroves',
'Dirnames',
'Basenames',
'Prefixes',
'FilePaths',
'TroveFiles',
'CheckTroveCache',
'UserGroupTroves',
'UserGroupInstancesCache',
'UserGroupAllPermissions',
'UserGroupAllTroves',
'LatestCache'
]
|
shakamunyi/neutron-vrrp | refs/heads/master | neutron/tests/unit/sriovnicagent/test_sriov_neutron_agent.py | 16 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.config import cfg
from neutron.plugins.sriovnicagent.common import config # noqa
from neutron.plugins.sriovnicagent import sriov_nic_agent
from neutron.tests import base
DEVICE_MAC = '11:22:33:44:55:66'
class TestSriovAgent(base.BaseTestCase):
def setUp(self):
super(TestSriovAgent, self).setUp()
# disable setting up periodic state reporting
cfg.CONF.set_override('report_interval', 0, 'AGENT')
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_default('enable_security_group',
False,
group='SECURITYGROUP')
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall)
self.agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0, None)
def test_treat_devices_removed_with_existed_device(self):
agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0, None)
devices = [DEVICE_MAC]
with mock.patch.object(agent.plugin_rpc,
"update_device_down") as fn_udd:
fn_udd.return_value = {'device': DEVICE_MAC,
'exists': True}
with mock.patch.object(sriov_nic_agent.LOG,
'info') as log:
resync = agent.treat_devices_removed(devices)
self.assertEqual(2, log.call_count)
self.assertFalse(resync)
self.assertTrue(fn_udd.called)
def test_treat_devices_removed_with_not_existed_device(self):
agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0, None)
devices = [DEVICE_MAC]
with mock.patch.object(agent.plugin_rpc,
"update_device_down") as fn_udd:
fn_udd.return_value = {'device': DEVICE_MAC,
'exists': False}
with mock.patch.object(sriov_nic_agent.LOG,
'debug') as log:
resync = agent.treat_devices_removed(devices)
self.assertEqual(1, log.call_count)
self.assertFalse(resync)
self.assertTrue(fn_udd.called)
def test_treat_devices_removed_failed(self):
agent = sriov_nic_agent.SriovNicSwitchAgent({}, {}, 0, None)
devices = [DEVICE_MAC]
with mock.patch.object(agent.plugin_rpc,
"update_device_down") as fn_udd:
fn_udd.side_effect = Exception()
with mock.patch.object(sriov_nic_agent.LOG,
'debug') as log:
resync = agent.treat_devices_removed(devices)
self.assertEqual(1, log.call_count)
self.assertTrue(resync)
self.assertTrue(fn_udd.called)
def mock_scan_devices(self, expected, mock_current,
registered_devices, updated_devices):
self.agent.eswitch_mgr = mock.Mock()
self.agent.eswitch_mgr.get_assigned_devices.return_value = mock_current
results = self.agent.scan_devices(registered_devices, updated_devices)
self.assertEqual(expected, results)
def test_scan_devices_returns_empty_sets(self):
registered = set()
updated = set()
mock_current = set()
expected = {'current': set(),
'updated': set(),
'added': set(),
'removed': set()}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_scan_devices_no_changes(self):
registered = set(['1', '2'])
updated = set()
mock_current = set(['1', '2'])
expected = {'current': set(['1', '2']),
'updated': set(),
'added': set(),
'removed': set()}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_scan_devices_new_and_removed(self):
registered = set(['1', '2'])
updated = set()
mock_current = set(['2', '3'])
expected = {'current': set(['2', '3']),
'updated': set(),
'added': set(['3']),
'removed': set(['1'])}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_scan_devices_new_updates(self):
registered = set(['1'])
updated = set(['2'])
mock_current = set(['1', '2'])
expected = {'current': set(['1', '2']),
'updated': set(['2']),
'added': set(['2']),
'removed': set()}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_scan_devices_updated_missing(self):
registered = set(['1'])
updated = set(['2'])
mock_current = set(['1'])
expected = {'current': set(['1']),
'updated': set(),
'added': set(),
'removed': set()}
self.mock_scan_devices(expected, mock_current, registered, updated)
def test_process_network_devices(self):
agent = self.agent
device_info = {'current': set(),
'added': set(['mac3', 'mac4']),
'updated': set(['mac2', 'mac3']),
'removed': set(['mac1'])}
agent.prepare_devices_filter = mock.Mock()
agent.refresh_firewall = mock.Mock()
agent.treat_devices_added_updated = mock.Mock(return_value=False)
agent.treat_devices_removed = mock.Mock(return_value=False)
agent.process_network_devices(device_info)
agent.prepare_devices_filter.assert_called_with(set(['mac3', 'mac4']))
self.assertTrue(agent.refresh_firewall.called)
agent.treat_devices_added_updated.assert_called_with(set(['mac2',
'mac3',
'mac4']))
agent.treat_devices_removed.assert_called_with(set(['mac1']))
def test_treat_devices_added_updated_admin_state_up_true(self):
agent = self.agent
mock_details = {'device': 'aa:bb:cc:dd:ee:ff',
'port_id': 'port123',
'network_id': 'net123',
'admin_state_up': True,
'network_type': 'vlan',
'segmentation_id': 100,
'profile': {'pci_slot': '1:2:3.0'},
'physical_network': 'physnet1'}
agent.plugin_rpc = mock.Mock()
agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
agent.eswitch_mgr = mock.Mock()
agent.eswitch_mgr.device_exists.return_value = True
agent.set_device_state = mock.Mock()
resync_needed = agent.treat_devices_added_updated(
set(['aa:bb:cc:dd:ee:ff']))
self.assertFalse(resync_needed)
agent.eswitch_mgr.device_exists.assert_called_with('aa:bb:cc:dd:ee:ff',
'1:2:3.0')
agent.eswitch_mgr.set_device_state.assert_called_with(
'aa:bb:cc:dd:ee:ff',
'1:2:3.0',
True)
self.assertTrue(agent.plugin_rpc.update_device_up.called)
def test_treat_devices_added_updated_admin_state_up_false(self):
agent = self.agent
mock_details = {'device': 'aa:bb:cc:dd:ee:ff',
'port_id': 'port123',
'network_id': 'net123',
'admin_state_up': False,
'network_type': 'vlan',
'segmentation_id': 100,
'profile': {'pci_slot': '1:2:3.0'},
'physical_network': 'physnet1'}
agent.plugin_rpc = mock.Mock()
agent.plugin_rpc.get_devices_details_list.return_value = [mock_details]
agent.remove_port_binding = mock.Mock()
resync_needed = agent.treat_devices_added_updated(
set(['aa:bb:cc:dd:ee:ff']))
self.assertFalse(resync_needed)
self.assertFalse(agent.plugin_rpc.update_device_up.called)
|
waseem18/oh-mainline | refs/heads/master | vendor/packages/docutils/test/test_parsers/test_rst/test_directives/test_unknown.py | 19 | #! /usr/bin/env python
# $Id: test_unknown.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Tests for unknown directives.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['unknown'] = [
["""\
.. reStructuredText-unknown-directive::
.. reStructuredText-unknown-directive:: argument
.. reStructuredText-unknown-directive::
block
""",
"""\
<document source="test data">
<system_message level="1" line="1" source="test data" type="INFO">
<paragraph>
No directive entry for "reStructuredText-unknown-directive" in module "docutils.parsers.rst.languages.en".
Trying "reStructuredText-unknown-directive" as canonical directive name.
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Unknown directive type "reStructuredText-unknown-directive".
<literal_block xml:space="preserve">
.. reStructuredText-unknown-directive::
<system_message level="1" line="3" source="test data" type="INFO">
<paragraph>
No directive entry for "reStructuredText-unknown-directive" in module "docutils.parsers.rst.languages.en".
Trying "reStructuredText-unknown-directive" as canonical directive name.
<system_message level="3" line="3" source="test data" type="ERROR">
<paragraph>
Unknown directive type "reStructuredText-unknown-directive".
<literal_block xml:space="preserve">
.. reStructuredText-unknown-directive:: argument
<system_message level="1" line="5" source="test data" type="INFO">
<paragraph>
No directive entry for "reStructuredText-unknown-directive" in module "docutils.parsers.rst.languages.en".
Trying "reStructuredText-unknown-directive" as canonical directive name.
<system_message level="3" line="5" source="test data" type="ERROR">
<paragraph>
Unknown directive type "reStructuredText-unknown-directive".
<literal_block xml:space="preserve">
.. reStructuredText-unknown-directive::
block
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
pressbooks/trellis | refs/heads/master | lib/trellis/utils/__init__.py | 673 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
cournape/numscons | refs/heads/master | numscons/scons-local/scons-local-1.2.0/SCons/Tool/packaging/zip.py | 1 | """SCons.Tool.Packaging.zip
The zip SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/zip.py 2009/09/04 16:33:07 david"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Zip']
bld.set_suffix('.zip')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bckwltn/SickRage | refs/heads/master | lib/unidecode/x096.py | 252 | data = (
'Fa ', # 0x00
'Ge ', # 0x01
'He ', # 0x02
'Kun ', # 0x03
'Jiu ', # 0x04
'Yue ', # 0x05
'Lang ', # 0x06
'Du ', # 0x07
'Yu ', # 0x08
'Yan ', # 0x09
'Chang ', # 0x0a
'Xi ', # 0x0b
'Wen ', # 0x0c
'Hun ', # 0x0d
'Yan ', # 0x0e
'E ', # 0x0f
'Chan ', # 0x10
'Lan ', # 0x11
'Qu ', # 0x12
'Hui ', # 0x13
'Kuo ', # 0x14
'Que ', # 0x15
'Ge ', # 0x16
'Tian ', # 0x17
'Ta ', # 0x18
'Que ', # 0x19
'Kan ', # 0x1a
'Huan ', # 0x1b
'Fu ', # 0x1c
'Fu ', # 0x1d
'Le ', # 0x1e
'Dui ', # 0x1f
'Xin ', # 0x20
'Qian ', # 0x21
'Wu ', # 0x22
'Yi ', # 0x23
'Tuo ', # 0x24
'Yin ', # 0x25
'Yang ', # 0x26
'Dou ', # 0x27
'E ', # 0x28
'Sheng ', # 0x29
'Ban ', # 0x2a
'Pei ', # 0x2b
'Keng ', # 0x2c
'Yun ', # 0x2d
'Ruan ', # 0x2e
'Zhi ', # 0x2f
'Pi ', # 0x30
'Jing ', # 0x31
'Fang ', # 0x32
'Yang ', # 0x33
'Yin ', # 0x34
'Zhen ', # 0x35
'Jie ', # 0x36
'Cheng ', # 0x37
'E ', # 0x38
'Qu ', # 0x39
'Di ', # 0x3a
'Zu ', # 0x3b
'Zuo ', # 0x3c
'Dian ', # 0x3d
'Ling ', # 0x3e
'A ', # 0x3f
'Tuo ', # 0x40
'Tuo ', # 0x41
'Po ', # 0x42
'Bing ', # 0x43
'Fu ', # 0x44
'Ji ', # 0x45
'Lu ', # 0x46
'Long ', # 0x47
'Chen ', # 0x48
'Xing ', # 0x49
'Duo ', # 0x4a
'Lou ', # 0x4b
'Mo ', # 0x4c
'Jiang ', # 0x4d
'Shu ', # 0x4e
'Duo ', # 0x4f
'Xian ', # 0x50
'Er ', # 0x51
'Gui ', # 0x52
'Yu ', # 0x53
'Gai ', # 0x54
'Shan ', # 0x55
'Xun ', # 0x56
'Qiao ', # 0x57
'Xing ', # 0x58
'Chun ', # 0x59
'Fu ', # 0x5a
'Bi ', # 0x5b
'Xia ', # 0x5c
'Shan ', # 0x5d
'Sheng ', # 0x5e
'Zhi ', # 0x5f
'Pu ', # 0x60
'Dou ', # 0x61
'Yuan ', # 0x62
'Zhen ', # 0x63
'Chu ', # 0x64
'Xian ', # 0x65
'Tou ', # 0x66
'Nie ', # 0x67
'Yun ', # 0x68
'Xian ', # 0x69
'Pei ', # 0x6a
'Pei ', # 0x6b
'Zou ', # 0x6c
'Yi ', # 0x6d
'Dui ', # 0x6e
'Lun ', # 0x6f
'Yin ', # 0x70
'Ju ', # 0x71
'Chui ', # 0x72
'Chen ', # 0x73
'Pi ', # 0x74
'Ling ', # 0x75
'Tao ', # 0x76
'Xian ', # 0x77
'Lu ', # 0x78
'Sheng ', # 0x79
'Xian ', # 0x7a
'Yin ', # 0x7b
'Zhu ', # 0x7c
'Yang ', # 0x7d
'Reng ', # 0x7e
'Shan ', # 0x7f
'Chong ', # 0x80
'Yan ', # 0x81
'Yin ', # 0x82
'Yu ', # 0x83
'Ti ', # 0x84
'Yu ', # 0x85
'Long ', # 0x86
'Wei ', # 0x87
'Wei ', # 0x88
'Nie ', # 0x89
'Dui ', # 0x8a
'Sui ', # 0x8b
'An ', # 0x8c
'Huang ', # 0x8d
'Jie ', # 0x8e
'Sui ', # 0x8f
'Yin ', # 0x90
'Gai ', # 0x91
'Yan ', # 0x92
'Hui ', # 0x93
'Ge ', # 0x94
'Yun ', # 0x95
'Wu ', # 0x96
'Wei ', # 0x97
'Ai ', # 0x98
'Xi ', # 0x99
'Tang ', # 0x9a
'Ji ', # 0x9b
'Zhang ', # 0x9c
'Dao ', # 0x9d
'Ao ', # 0x9e
'Xi ', # 0x9f
'Yin ', # 0xa0
'[?] ', # 0xa1
'Rao ', # 0xa2
'Lin ', # 0xa3
'Tui ', # 0xa4
'Deng ', # 0xa5
'Pi ', # 0xa6
'Sui ', # 0xa7
'Sui ', # 0xa8
'Yu ', # 0xa9
'Xian ', # 0xaa
'Fen ', # 0xab
'Ni ', # 0xac
'Er ', # 0xad
'Ji ', # 0xae
'Dao ', # 0xaf
'Xi ', # 0xb0
'Yin ', # 0xb1
'E ', # 0xb2
'Hui ', # 0xb3
'Long ', # 0xb4
'Xi ', # 0xb5
'Li ', # 0xb6
'Li ', # 0xb7
'Li ', # 0xb8
'Zhui ', # 0xb9
'He ', # 0xba
'Zhi ', # 0xbb
'Zhun ', # 0xbc
'Jun ', # 0xbd
'Nan ', # 0xbe
'Yi ', # 0xbf
'Que ', # 0xc0
'Yan ', # 0xc1
'Qian ', # 0xc2
'Ya ', # 0xc3
'Xiong ', # 0xc4
'Ya ', # 0xc5
'Ji ', # 0xc6
'Gu ', # 0xc7
'Huan ', # 0xc8
'Zhi ', # 0xc9
'Gou ', # 0xca
'Jun ', # 0xcb
'Ci ', # 0xcc
'Yong ', # 0xcd
'Ju ', # 0xce
'Chu ', # 0xcf
'Hu ', # 0xd0
'Za ', # 0xd1
'Luo ', # 0xd2
'Yu ', # 0xd3
'Chou ', # 0xd4
'Diao ', # 0xd5
'Sui ', # 0xd6
'Han ', # 0xd7
'Huo ', # 0xd8
'Shuang ', # 0xd9
'Guan ', # 0xda
'Chu ', # 0xdb
'Za ', # 0xdc
'Yong ', # 0xdd
'Ji ', # 0xde
'Xi ', # 0xdf
'Chou ', # 0xe0
'Liu ', # 0xe1
'Li ', # 0xe2
'Nan ', # 0xe3
'Xue ', # 0xe4
'Za ', # 0xe5
'Ji ', # 0xe6
'Ji ', # 0xe7
'Yu ', # 0xe8
'Yu ', # 0xe9
'Xue ', # 0xea
'Na ', # 0xeb
'Fou ', # 0xec
'Se ', # 0xed
'Mu ', # 0xee
'Wen ', # 0xef
'Fen ', # 0xf0
'Pang ', # 0xf1
'Yun ', # 0xf2
'Li ', # 0xf3
'Li ', # 0xf4
'Ang ', # 0xf5
'Ling ', # 0xf6
'Lei ', # 0xf7
'An ', # 0xf8
'Bao ', # 0xf9
'Meng ', # 0xfa
'Dian ', # 0xfb
'Dang ', # 0xfc
'Xing ', # 0xfd
'Wu ', # 0xfe
'Zhao ', # 0xff
)
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/tangible/ship/crafted/capacitor/shared_energy_saver_battery_mk2.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/crafted/capacitor/shared_energy_saver_battery_mk2.iff"
result.attribute_template_id = 8
result.stfName("space_crafting_n","energy_saver_battery_mk2")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.