repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
nielsvanoch/django
|
refs/heads/master
|
tests/custom_methods/tests.py
|
228
|
from __future__ import unicode_literals
from datetime import date
from django.test import TestCase
from .models import Article
class MethodsTests(TestCase):
def test_custom_methods(self):
a = Article.objects.create(
headline="Area man programs in Python", pub_date=date(2005, 7, 27)
)
b = Article.objects.create(
headline="Beatles reunite", pub_date=date(2005, 7, 27)
)
self.assertFalse(a.was_published_today())
self.assertQuerysetEqual(
a.articles_from_same_day_1(), [
"Beatles reunite",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
a.articles_from_same_day_2(), [
"Beatles reunite",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
b.articles_from_same_day_1(), [
"Area man programs in Python",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
b.articles_from_same_day_2(), [
"Area man programs in Python",
],
lambda a: a.headline
)
|
aldariz/Sick-Beard
|
refs/heads/torrent_1080_subtitles
|
lib/requests/packages/chardet/langthaimodel.py
|
2929
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
byterom/android_external_chromium_org
|
refs/heads/12.1
|
ppapi/generators/idl_ast.py
|
104
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Nodes for PPAPI IDL AST."""
from idl_namespace import IDLNamespace
from idl_node import IDLNode
from idl_option import GetOption
from idl_visitor import IDLVisitor
from idl_release import IDLReleaseMap
#
# IDLLabelResolver
#
# A specialized visitor which traverses the AST, building a mapping of
# Release names to Versions numbers and calculating a min version.
# The mapping is applied to the File nodes within the AST.
#
class IDLLabelResolver(IDLVisitor):
def Depart(self, node, ignore, childdata):
# Build list of Release=Version
if node.IsA('LabelItem'):
channel = node.GetProperty('channel')
if not channel:
channel = 'stable'
return (node.GetName(), node.GetProperty('VALUE'), channel)
# On completion of the Label, apply to the parent File if the
# name of the label matches the generation label.
if node.IsA('Label') and node.GetName() == GetOption('label'):
try:
node.parent.release_map = IDLReleaseMap(childdata)
except Exception as err:
node.Error('Unable to build release map: %s' % str(err))
# For File objects, set the minimum version
if node.IsA('File'):
file_min, _ = node.release_map.GetReleaseRange()
node.SetMin(file_min)
return None
#
# IDLNamespaceVersionResolver
#
# A specialized visitor which traverses the AST, building a namespace tree
# as it goes. The namespace tree is mapping from a name to a version list.
# Labels must already be resolved to use.
#
class IDLNamespaceVersionResolver(IDLVisitor):
NamespaceSet = set(['AST', 'Callspec', 'Interface', 'Member', 'Struct'])
#
# When we arrive at a node we must assign it a namespace and if the
# node is named, then place it in the appropriate namespace.
#
def Arrive(self, node, parent_namespace):
# If we are a File, grab the Min version and replease mapping
if node.IsA('File'):
self.rmin = node.GetMinMax()[0]
self.release_map = node.release_map
# Set the min version on any non Label within the File
if not node.IsA('AST', 'File', 'Label', 'LabelItem'):
my_min, _ = node.GetMinMax()
if not my_min:
node.SetMin(self.rmin)
# If this object is not a namespace aware object, use the parent's one
if node.cls not in self.NamespaceSet:
node.namespace = parent_namespace
else:
# otherwise create one.
node.namespace = IDLNamespace(parent_namespace)
# If this node is named, place it in its parent's namespace
if parent_namespace and node.cls in IDLNode.NamedSet:
# Set version min and max based on properties
if self.release_map:
vmin = node.GetProperty('dev_version')
if vmin == None:
vmin = node.GetProperty('version')
vmax = node.GetProperty('deprecate')
# If no min is available, the use the parent File's min
if vmin == None:
rmin = self.rmin
else:
rmin = self.release_map.GetRelease(vmin)
rmax = self.release_map.GetRelease(vmax)
node.SetReleaseRange(rmin, rmax)
parent_namespace.AddNode(node)
# Pass this namespace to each child in case they inherit it
return node.namespace
#
# IDLFileTypeRessolver
#
# A specialized visitor which traverses the AST and sets a FILE property
# on all file nodes. In addition, searches the namespace resolving all
# type references. The namespace tree must already have been populated
# before this visitor is used.
#
class IDLFileTypeResolver(IDLVisitor):
def VisitFilter(self, node, data):
return not node.IsA('Comment', 'Copyright')
def Arrive(self, node, filenode):
# Track the file node to update errors
if node.IsA('File'):
node.SetProperty('FILE', node)
filenode = node
if not node.IsA('AST'):
file_min, _ = filenode.release_map.GetReleaseRange()
if not file_min:
print 'Resetting min on %s to %s' % (node, file_min)
node.SetMinRange(file_min)
# If this node has a TYPEREF, resolve it to a version list
typeref = node.GetPropertyLocal('TYPEREF')
if typeref:
node.typelist = node.parent.namespace.FindList(typeref)
if not node.typelist:
node.Error('Could not resolve %s.' % typeref)
else:
node.typelist = None
return filenode
#
# IDLReleaseResolver
#
# A specialized visitor which will traverse the AST, and generate a mapping
# from any release to the first release in which that version of the object
# was generated. Types must already be resolved to use.
#
class IDLReleaseResolver(IDLVisitor):
def Arrive(self, node, releases):
node.BuildReleaseMap(releases)
return releases
#
# IDLAst
#
# A specialized version of the IDLNode for containing the whole of the
# AST. Construction of the AST object will cause resolution of the
# tree including versions, types, etc... Errors counts will be collected
# both per file, and on the AST itself.
#
class IDLAst(IDLNode):
def __init__(self, children):
IDLNode.__init__(self, 'AST', 'BuiltIn', 1, 0, children)
self.Resolve()
def Resolve(self):
# Set the appropriate Release=Version mapping for each File
IDLLabelResolver().Visit(self, None)
# Generate the Namesapce Tree
self.namespace = IDLNamespace(None)
IDLNamespaceVersionResolver().Visit(self, self.namespace)
# Using the namespace, resolve type references
IDLFileTypeResolver().Visit(self, None)
# Build an ordered list of all releases
releases = set()
for filenode in self.GetListOf('File'):
releases |= set(filenode.release_map.GetReleases())
# Generate a per node list of releases and release mapping
IDLReleaseResolver().Visit(self, sorted(releases))
for filenode in self.GetListOf('File'):
errors = filenode.GetProperty('ERRORS')
if errors:
self.errors += errors
|
hejunbok/apm_planner
|
refs/heads/master
|
libs/mavlink/share/pyshared/pymavlink/examples/bwtest.py
|
34
|
#!/usr/bin/env python
'''
check bandwidth of link
'''
import sys, struct, time, os
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import mavutil
from optparse import OptionParser
parser = OptionParser("bwtest.py [options]")
parser.add_option("--baudrate", dest="baudrate", type='int',
help="master port baud rate", default=115200)
parser.add_option("--device", dest="device", default=None, help="serial device")
(opts, args) = parser.parse_args()
if opts.device is None:
print("You must specify a serial device")
sys.exit(1)
# create a mavlink serial instance
master = mavutil.mavlink_connection(opts.device, baud=opts.baudrate)
t1 = time.time()
counts = {}
bytes_sent = 0
bytes_recv = 0
while True:
master.mav.heartbeat_send(1, 1)
master.mav.sys_status_send(1, 2, 3, 4, 5, 6, 7)
master.mav.gps_raw_send(1, 2, 3, 4, 5, 6, 7, 8, 9)
master.mav.attitude_send(1, 2, 3, 4, 5, 6, 7)
master.mav.vfr_hud_send(1, 2, 3, 4, 5, 6)
while master.port.inWaiting() > 0:
m = master.recv_msg()
if m == None: break
if m.get_type() not in counts:
counts[m.get_type()] = 0
counts[m.get_type()] += 1
t2 = time.time()
if t2 - t1 > 1.0:
print("%u sent, %u received, %u errors bwin=%.1f kB/s bwout=%.1f kB/s" % (
master.mav.total_packets_sent,
master.mav.total_packets_received,
master.mav.total_receive_errors,
0.001*(master.mav.total_bytes_received-bytes_recv)/(t2-t1),
0.001*(master.mav.total_bytes_sent-bytes_sent)/(t2-t1)))
bytes_sent = master.mav.total_bytes_sent
bytes_recv = master.mav.total_bytes_received
t1 = t2
|
Hexadorsimal/pynes
|
refs/heads/master
|
nes/processors/cpu/instructions/flags/cli.py
|
1
|
from .clear import ClearInstruction
class Cli(ClearInstruction):
flag_name = 'i'
|
gokudomatic/cobiv
|
refs/heads/master
|
cobiv/modules/core/session/session.py
|
1
|
from collections import deque
from datetime import datetime
import copy
from cobiv.libs.templite import Templite
from cobiv.modules.core.entity import Entity
from cobiv.modules.core.session.cursor import Cursor
class CoreVariables:
def __init__(self, session):
self.session = session
session.fields['file_size'] = self.get_file_size
session.fields['image_size'] = self.get_image_size
session.fields['file_format'] = self.get_image_format
session.fields['file_date'] = self.get_file_date
session.fields['filename'] = lambda: self.session.cursor.filename
session.fields['currentset_position'] = lambda: (
(self.session.cursor.pos + 1) if self.session.cursor.pos is not None else "0"
) if not self.session.cursor.is_eol() else "EOL"
session.fields['currentset_count'] = lambda: len(
self.session.cursor) if self.session.cursor.pos is not None else "0"
def get_simple_field(self, category, field_name, formatter=None):
if self.session.cursor.file_id is None:
return "N/A"
if not field_name in self.session.cursor.get_tags()[category]:
return "N/A"
values = self.session.cursor.get_tags()[category][field_name]
value = values[0] if len(values) > 0 else None
if formatter is None:
return value
else:
return formatter(value)
def get_file_size(self):
return self.get_simple_field(0, 'size')
def get_file_date(self):
mod_date = self.get_simple_field(0, 'file_date')
if mod_date != "N/A":
mod_date = datetime.fromtimestamp(float(mod_date)).strftime('%Y-%m-%d %H:%M:%S')
return mod_date
def get_image_size(self):
if self.session.cursor.file_id is not None:
tags = self.session.cursor.get_tags()
width, height = None, None
if tags[0]['file_type'][0] == 'file':
width = tags[0]['width'][0]
height = tags[0]['height'][0]
if width is not None and height is not None:
return str(width) + " x " + str(height)
return "N/A"
def get_image_format(self):
return self.get_simple_field(0, 'format')
@staticmethod
def sizeof_fmt(num, suffix='B'):
num = int(num)
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Y', suffix)
class HistoryContext:
def __init__(self):
self.fn = None
self.args = {}
self.category = None
def clone(self):
clone = HistoryContext()
clone.fn = self.fn
clone.args = copy.deepcopy(self.args)
clone.category = self.category
return clone
class Session(Entity):
cursor = None
fields = {}
active_fs = {}
cmd_actions = {}
cmd_hotkeys = {}
mimetype_actions = {}
view_context = {}
view_category = None
view_category_history = deque()
view_history = deque()
max_view_history_size = 20
skip_push_context = False
def __init__(self):
self.cursor = Cursor()
CoreVariables(self)
def set_cursor(self, new_cursor):
self.cursor.unbind(file_id=self.on_file_id_change)
self.cursor = new_cursor
self.cursor.bind(file_id=self.on_file_id_change)
def on_file_id_change(self, instance, value):
pass
def fill_text_fields(self, original_text):
return Templite(original_text.replace("%{", "${write(").replace("}%", ")}$")).render(**self.fields)
def get_filesystem(self, key):
return self.active_fs[key]
def add_filesystem(self, key, filesystem):
self.active_fs[key] = filesystem
def set_action(self, name, fn, profile="default"):
if name in self.cmd_actions:
self.cmd_actions[name][profile] = fn
else:
self.cmd_actions[name] = {profile: fn}
def set_hotkey(self, key, command, modifier=0, profile="default"):
if key in self.cmd_hotkeys:
hotkey = self.cmd_hotkeys[key]
if profile in hotkey:
hotkey[profile][modifier] = command
else:
hotkey[profile] = {modifier: command}
else:
self.cmd_hotkeys[key] = {profile: {modifier: command}}
def get_hotkey_command(self, key, modifier=0, profile="default"):
hotkeys_profiles = self.cmd_hotkeys[key]
if profile in hotkeys_profiles:
hotkeys = hotkeys_profiles[profile]
if modifier in hotkeys:
return hotkeys[modifier]
return False
def register_mimetype_action(self, mimetype, action, fn):
self.mimetype_actions.setdefault(mimetype, {})[action] = fn
def get_mimetype_action(self, mimetype, action, default=None):
if mimetype in self.mimetype_actions:
if action in self.mimetype_actions[mimetype]:
return self.mimetype_actions[mimetype][action]
else:
return default
def get_context(self, category):
return self.view_context.setdefault(category, HistoryContext())
def push_context(self, category):
if not self.skip_push_context:
self.view_category_history.append(category)
self.view_context[category].category = category
self.view_history.append(self.view_context[category].clone())
def pop_context(self):
if len(self.view_history) > 0:
view_category = self.view_category_history.pop()
self.view_context[view_category] = self.view_history.pop()
return self.view_context[view_category]
else:
return None
|
veger/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/panos/panos_admpwd.py
|
27
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_admpwd
short_description: change admin password of PAN-OS device using SSH with SSH key
description:
- Change the admin password of PAN-OS via SSH using a SSH key for authentication.
- Useful for AWS instances where the first login should be done via SSH.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- paramiko
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
username:
description:
- username for initial authentication
required: false
default: "admin"
key_filename:
description:
- filename of the SSH Key to use for authentication
required: true
newpassword:
description:
- password to configure for admin on the PAN-OS device
required: true
'''
EXAMPLES = '''
# Tries for 10 times to set the admin password of 192.168.1.1 to "badpassword"
# via SSH, authenticating using key /tmp/ssh.key
- name: set admin password
panos_admpwd:
ip_address: "192.168.1.1"
username: "admin"
key_filename: "/tmp/ssh.key"
newpassword: "badpassword"
register: result
until: result is not failed
retries: 10
delay: 30
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "Last login: Fri Sep 16 11:09:20 2016 from 10.35.34.56.....Configuration committed successfully"
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
import time
import sys
try:
import paramiko
HAS_LIB = True
except ImportError:
HAS_LIB = False
_PROMPTBUFF = 4096
def wait_with_timeout(module, shell, prompt, timeout=60):
now = time.time()
result = ""
while True:
if shell.recv_ready():
result += shell.recv(_PROMPTBUFF)
endresult = result.strip()
if len(endresult) != 0 and endresult[-1] == prompt:
break
if time.time() - now > timeout:
module.fail_json(msg="Timeout waiting for prompt")
return result
def set_panwfw_password(module, ip_address, key_filename, newpassword, username):
stdout = ""
ssh = paramiko.SSHClient()
# add policy to accept all host keys, I haven't found
# a way to retrieve the instance SSH key fingerprint from AWS
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip_address, username=username, key_filename=key_filename)
shell = ssh.invoke_shell()
# wait for the shell to start
buff = wait_with_timeout(module, shell, ">")
stdout += buff
# step into config mode
shell.send('configure\n')
# wait for the config prompt
buff = wait_with_timeout(module, shell, "#")
stdout += buff
if module.check_mode:
# exit and close connection
shell.send('exit\n')
ssh.close()
return False, 'Connection test successful. Password left intact.'
# set admin password
shell.send('set mgt-config users ' + username + ' password\n')
# wait for the password prompt
buff = wait_with_timeout(module, shell, ":")
stdout += buff
# enter password for the first time
shell.send(newpassword + '\n')
# wait for the password prompt
buff = wait_with_timeout(module, shell, ":")
stdout += buff
# enter password for the second time
shell.send(newpassword + '\n')
# wait for the config mode prompt
buff = wait_with_timeout(module, shell, "#")
stdout += buff
# commit !
shell.send('commit\n')
# wait for the prompt
buff = wait_with_timeout(module, shell, "#", 120)
stdout += buff
if 'success' not in buff:
module.fail_json(msg="Error setting " + username + " password: " + stdout)
# exit
shell.send('exit\n')
ssh.close()
return True, stdout
def main():
argument_spec = dict(
ip_address=dict(required=True),
username=dict(default='admin'),
key_filename=dict(required=True),
newpassword=dict(no_log=True, required=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_LIB:
module.fail_json(msg='paramiko is required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
key_filename = module.params["key_filename"]
if not key_filename:
module.fail_json(msg="key_filename should be specified")
newpassword = module.params["newpassword"]
if not newpassword:
module.fail_json(msg="newpassword is required")
username = module.params['username']
try:
changed, stdout = set_panwfw_password(module, ip_address, key_filename, newpassword, username)
module.exit_json(changed=changed, stdout=stdout)
except Exception:
x = sys.exc_info()[1]
module.fail_json(msg=x)
if __name__ == '__main__':
main()
|
laszlocsomor/tensorflow
|
refs/heads/master
|
tensorflow/python/training/coordinator_test.py
|
18
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import time
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
def StopOnEvent(coord, wait_for_stop, set_when_stopped):
wait_for_stop.wait()
coord.request_stop()
set_when_stopped.set()
def RaiseOnEvent(coord, wait_for_stop, set_when_stopped, ex, report_exception):
try:
wait_for_stop.wait()
raise ex
except RuntimeError as e:
if report_exception:
coord.request_stop(e)
else:
coord.request_stop(sys.exc_info())
finally:
if set_when_stopped:
set_when_stopped.set()
def RaiseOnEventUsingContextHandler(coord, wait_for_stop, set_when_stopped, ex):
with coord.stop_on_exception():
wait_for_stop.wait()
raise ex
if set_when_stopped:
set_when_stopped.set()
def SleepABit(n_secs, coord=None):
if coord:
coord.register_thread(threading.current_thread())
time.sleep(n_secs)
def WaitForThreadsToRegister(coord, num_threads):
while True:
with coord._lock:
if len(coord._registered_threads) == num_threads:
break
time.sleep(0.001)
class CoordinatorTest(test.TestCase):
def testStopAPI(self):
coord = coordinator.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
coord.request_stop()
self.assertTrue(coord.should_stop())
self.assertTrue(coord.wait_for_stop(0.01))
def testStopAsync(self):
coord = coordinator.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.1))
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
t = threading.Thread(target=StopOnEvent,
args=(coord, wait_for_stop_ev, has_stopped_ev))
t.start()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
wait_for_stop_ev.set()
has_stopped_ev.wait()
self.assertTrue(coord.wait_for_stop(0.05))
self.assertTrue(coord.should_stop())
def testJoin(self):
coord = coordinator.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01,)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01,))]
for t in threads:
t.start()
coord.join(threads)
for t in threads:
self.assertFalse(t.is_alive())
def testJoinAllRegistered(self):
coord = coordinator.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02, coord)),
threading.Thread(target=SleepABit, args=(0.01, coord))]
for t in threads:
t.start()
WaitForThreadsToRegister(coord, 3)
coord.join()
for t in threads:
self.assertFalse(t.is_alive())
def testJoinSomeRegistered(self):
coord = coordinator.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01, coord))]
for t in threads:
t.start()
WaitForThreadsToRegister(coord, 2)
# threads[1] is not registered we must pass it in.
coord.join(threads[1:1])
for t in threads:
self.assertFalse(t.is_alive())
def testJoinGraceExpires(self):
def TestWithGracePeriod(stop_grace_period):
coord = coordinator.Coordinator()
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
threads = [
threading.Thread(target=StopOnEvent,
args=(coord, wait_for_stop_ev, has_stopped_ev)),
threading.Thread(target=SleepABit, args=(10.0,))]
for t in threads:
t.daemon = True
t.start()
wait_for_stop_ev.set()
has_stopped_ev.wait()
with self.assertRaisesRegexp(RuntimeError, "threads still running"):
coord.join(threads, stop_grace_period_secs=stop_grace_period)
TestWithGracePeriod(1e-10)
TestWithGracePeriod(0.002)
TestWithGracePeriod(1.0)
def testJoinWithoutGraceExpires(self):
coord = coordinator.Coordinator()
wait_for_stop_ev = threading.Event()
has_stopped_ev = threading.Event()
threads = [
threading.Thread(target=StopOnEvent,
args=(coord, wait_for_stop_ev, has_stopped_ev)),
threading.Thread(target=SleepABit, args=(10.0,))]
for t in threads:
t.daemon = True
t.start()
wait_for_stop_ev.set()
has_stopped_ev.wait()
coord.join(
threads, stop_grace_period_secs=1., ignore_live_threads=True)
def testJoinRaiseReportExcInfo(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
ev_2 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, ev_2, RuntimeError("First"), False)),
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_2, None, RuntimeError("Too late"), False))]
for t in threads:
t.start()
ev_1.set()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportException(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
ev_2 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, ev_2, RuntimeError("First"), True)),
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_2, None, RuntimeError("Too late"), True))]
for t in threads:
t.start()
ev_1.set()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinIgnoresOutOfRange(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None,
errors_impl.OutOfRangeError(None, None, "First"),
True))
]
for t in threads:
t.start()
ev_1.set()
coord.join(threads)
def testJoinIgnoresMyExceptionType(self):
coord = coordinator.Coordinator(clean_stop_exception_types=(ValueError,))
ev_1 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None, ValueError("Clean stop"), True))
]
for t in threads:
t.start()
ev_1.set()
coord.join(threads)
def testJoinRaiseReportExceptionUsingHandler(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
ev_2 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEventUsingContextHandler,
args=(coord, ev_1, ev_2, RuntimeError("First"))),
threading.Thread(
target=RaiseOnEventUsingContextHandler,
args=(coord, ev_2, None, RuntimeError("Too late")))]
for t in threads:
t.start()
ev_1.set()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testClearStopClearsExceptionToo(self):
coord = coordinator.Coordinator()
ev_1 = threading.Event()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None, RuntimeError("First"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
ev_1.set()
coord.join(threads)
coord.clear_stop()
threads = [
threading.Thread(
target=RaiseOnEvent,
args=(coord, ev_1, None, RuntimeError("Second"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "Second"):
ev_1.set()
coord.join(threads)
def testRequestStopRaisesIfJoined(self):
coord = coordinator.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError as e:
reported = True
coord.request_stop(e)
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError as e:
coord.request_stop(e)
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def testRequestStopRaisesIfJoined_ExcInfo(self):
# Same as testRequestStopRaisesIfJoined but using syc.exc_info().
coord = coordinator.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError:
reported = True
coord.request_stop(sys.exc_info())
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError:
coord.request_stop(sys.exc_info())
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def _StopAt0(coord, n):
if n[0] == 0:
coord.request_stop()
else:
n[0] -= 1
class LooperTest(test.TestCase):
def testTargetArgs(self):
n = [3]
coord = coordinator.Coordinator()
thread = coordinator.LooperThread.loop(coord, 0, target=_StopAt0,
args=(coord, n))
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetKwargs(self):
n = [3]
coord = coordinator.Coordinator()
thread = coordinator.LooperThread.loop(coord, 0, target=_StopAt0,
kwargs={"coord": coord, "n": n})
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetMixedArgs(self):
n = [3]
coord = coordinator.Coordinator()
thread = coordinator.LooperThread.loop(coord, 0, target=_StopAt0,
args=(coord,), kwargs={"n": n})
coord.join([thread])
self.assertEqual(0, n[0])
if __name__ == "__main__":
test.main()
|
jjas0nn/solvem
|
refs/heads/master
|
tensorflow/lib/python2.7/site-packages/tensorflow/python/framework/__init__.py
|
12133432
| |
pdellaert/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovh/ovh_ip_failover.py
|
42
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovh_ip_failover
short_description: Manage OVH IP failover address
description:
- Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move
an ip failover (or failover block) between services
version_added: "2.8"
author: "Pascal HERAUD (@pascalheraud)"
notes:
- Uses the python OVH Api U(https://github.com/ovh/python-ovh).
You have to create an application (a key and secret) with a consummer
key as described into U(https://eu.api.ovh.com/g934.first_step_with_api)
requirements:
- ovh >= 0.4.8
options:
name:
required: true
description:
- The IP address to manage (can be a single IP like 1.1.1.1
or a block like 1.1.1.1/28 )
service:
required: true
description:
- The name of the OVH service this IP address should be routed
endpoint:
required: true
description:
- The endpoint to use ( for instance ovh-eu)
wait_completion:
required: false
default: true
type: bool
description:
- If true, the module will wait for the IP address to be moved.
If false, exit without waiting. The taskId will be returned
in module output
wait_task_completion:
required: false
default: 0
description:
- If not 0, the module will wait for this task id to be
completed. Use wait_task_completion if you want to wait for
completion of a previously executed task with
wait_completion=false. You can execute this module repeatedly on
a list of failover IPs using wait_completion=false (see examples)
application_key:
required: true
description:
- The applicationKey to use
application_secret:
required: true
description:
- The application secret to use
consumer_key:
required: true
description:
- The consumer key to use
timeout:
required: false
default: 120
description:
- The timeout in seconds used to wait for a task to be
completed. Default is 120 seconds.
'''
EXAMPLES = '''
# Route an IP address 1.1.1.1 to the service ns666.ovh.net
- ovh_ip_failover:
name: 1.1.1.1
service: ns666.ovh.net
endpoint: ovh-eu
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
- ovh_ip_failover:
name: 1.1.1.1
service: ns666.ovh.net
endpoint: ovh-eu
wait_completion: false
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
register: moved
- ovh_ip_failover:
name: 1.1.1.1
service: ns666.ovh.net
endpoint: ovh-eu
wait_task_completion: "{{moved.taskId}}"
application_key: yourkey
application_secret: yoursecret
consumer_key: yourconsumerkey
'''
RETURN = '''
'''
import time
try:
import ovh
import ovh.exceptions
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import quote_plus
def getOvhClient(ansibleModule):
endpoint = ansibleModule.params.get('endpoint')
application_key = ansibleModule.params.get('application_key')
application_secret = ansibleModule.params.get('application_secret')
consumer_key = ansibleModule.params.get('consumer_key')
return ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key
)
def waitForNoTask(client, name, timeout):
currentTimeout = timeout
while client.get('/ip/{0}/task'.format(quote_plus(name)),
function='genericMoveFloatingIp',
status='todo'):
time.sleep(1) # Delay for 1 sec
currentTimeout -= 1
if currentTimeout < 0:
return False
return True
def waitForTaskDone(client, name, taskId, timeout):
currentTimeout = timeout
while True:
task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId))
if task['status'] == 'done':
return True
time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API
currentTimeout -= 5
if currentTimeout < 0:
return False
return True
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
service=dict(required=True),
endpoint=dict(required=True),
wait_completion=dict(default=True, type='bool'),
wait_task_completion=dict(default=0, type='int'),
application_key=dict(required=True, no_log=True),
application_secret=dict(required=True, no_log=True),
consumer_key=dict(required=True, no_log=True),
timeout=dict(default=120, type='int')
),
supports_check_mode=True
)
result = dict(
changed=False
)
if not HAS_OVH:
module.fail_json(msg='ovh-api python module is required to run this module ')
# Get parameters
name = module.params.get('name')
service = module.params.get('service')
timeout = module.params.get('timeout')
wait_completion = module.params.get('wait_completion')
wait_task_completion = module.params.get('wait_task_completion')
# Connect to OVH API
client = getOvhClient(module)
# Check that the load balancing exists
try:
ips = client.get('/ip', ip=name, type='failover')
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of ips, '
'check application key, secret, consumerkey and parameters. '
'Error returned by OVH api was : {0}'.format(apiError))
if name not in ips and '{0}/32'.format(name) not in ips:
module.fail_json(msg='IP {0} does not exist'.format(name))
# Check that no task is pending before going on
try:
if not waitForNoTask(client, name, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for no pending '
'tasks before executing the module '.format(timeout))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the list of pending tasks '
'of the ip, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
try:
ipproperties = client.get('/ip/{0}'.format(quote_plus(name)))
except APIError as apiError:
module.fail_json(
msg='Unable to call OVH api for getting the properties '
'of the ip, check application key, secret, consumerkey '
'and parameters. Error returned by OVH api was : {0}'
.format(apiError))
if ipproperties['routedTo']['serviceName'] != service:
if not module.check_mode:
if wait_task_completion == 0:
# Move the IP and get the created taskId
task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service)
taskId = task['taskId']
result['moved'] = True
else:
# Just wait for the given taskId to be completed
taskId = wait_task_completion
result['moved'] = False
result['taskId'] = taskId
if wait_completion or wait_task_completion != 0:
if not waitForTaskDone(client, name, taskId, timeout):
module.fail_json(
msg='Timeout of {0} seconds while waiting for completion '
'of move ip to service'.format(timeout))
result['waited'] = True
else:
result['waited'] = False
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
philcleveland/grpc
|
refs/heads/master
|
examples/python/route_guide/route_guide_resources.py
|
115
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common resources used in the gRPC route guide example."""
import json
import route_guide_pb2
def read_route_guide_database():
"""Reads the route guide database.
Returns:
The full contents of the route guide database as a sequence of
route_guide_pb2.Features.
"""
feature_list = []
with open("route_guide_db.json") as route_guide_db_file:
for item in json.load(route_guide_db_file):
feature = route_guide_pb2.Feature(
name=item["name"],
location=route_guide_pb2.Point(
latitude=item["location"]["latitude"],
longitude=item["location"]["longitude"]))
feature_list.append(feature)
return feature_list
|
MichaelDrogalis/ansible
|
refs/heads/devel
|
test/units/parsing/test_unquote.py
|
152
|
# coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from nose import tools
from ansible.compat.tests import unittest
from ansible.parsing.splitter import unquote
# Tests using nose's test generators cannot use unittest base class.
# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
class TestUnquote:
UNQUOTE_DATA = (
(u'1', u'1'),
(u'\'1\'', u'1'),
(u'"1"', u'1'),
(u'"1 \'2\'"', u'1 \'2\''),
(u'\'1 "2"\'', u'1 "2"'),
(u'\'1 \'2\'\'', u'1 \'2\''),
(u'"1\\"', u'"1\\"'),
(u'\'1\\\'', u'\'1\\\''),
(u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'),
(u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'),
(u'"', u'"'),
(u'\'', u'\''),
# Not entirely sure these are good but they match the current
# behaviour
(u'"1""2"', u'1""2'),
(u'\'1\'\'2\'', u'1\'\'2'),
(u'"1" 2 "3"', u'1" 2 "3'),
(u'"1"\'2\'"3"', u'1"\'2\'"3'),
)
def check_unquote(self, quoted, expected):
tools.eq_(unquote(quoted), expected)
def test_unquote(self):
for datapoint in self.UNQUOTE_DATA:
yield self.check_unquote, datapoint[0], datapoint[1]
|
PRJosh/android_kernel_samsung_mondrianwifi
|
refs/heads/cm-11.0
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
Jovy23/M919_Kernel
|
refs/heads/master
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
PapenfussLab/MHC-clogs
|
refs/heads/master
|
lib/mungolite/mungoCore.py
|
2
|
"""
mungoCore - abstract base classes and core utilites
"""
import sys
import copy
from useful import smartopen, raiseDeprecated
def attributesToFormat(attributes):
return '\t'.join(['%%(%s)s' % attr for attr in attributes])
class AbstractFeature(object):
"""Feature base class"""
def __init__(self, *args, **kw):
self._rawData = None
self.indefault = kw.pop('indefault', None)
self.outdefault = kw.pop('outdefault', '.')
doConvert = kw.pop('doConvert', True)
if not args and not kw:
self._fromDict({})
doConvert = False
elif kw:
self._fromDict(kw)
elif type(args[0])==dict:
self._fromDict(args[0])
elif type(args[0]) in [list, tuple]:
self._fromList(args[0])
elif len(args)==len(attributes):
self._fromDict(args)
else:
try:
self._fromDict(args[0].__dict__)
except:
print attributes
print args
print kw
raise Exception, "I don't know what else to do."
if doConvert:
self.convert()
def convert(self):
if type(self.converters)==dict:
self.converters = self.converters.items()
for attrib,converter in self.converters:
try:
self.__dict__[attrib] = converter(self.__dict__[attrib])
except TypeError:
pass
except ValueError:
print self.__dict__
sys.exit(-1)
def __repr__(self):
return self.format % self.__dict__
def _fromList(self, tokens):
self._rawData = tokens
for key,value in zip(self.attributes, tokens):
if value!=self.outdefault:
self.__dict__[key] = value
else:
self.__dict__[key] = self.indefault
def _fromDict(self, d):
self._rawData = d
for attrib in self.attributes:
self.__dict__[attrib] = d.get(attrib, self.indefault)
def pretty(self, w=12):
format = '%%-%is\t%%s' % w
output = []
for attrib in self.attributes:
output.append(format % (attrib, str(self.__dict__[attrib])))
return '\n'.join(output)
def prettyprint(self):
print self.pretty()
def addAttribute(self, field, value=None, format=None):
self.attributes.append(field)
if not value:
self.__dict__[field] = self.indefault
else:
self.__dict__[field] = value
if not format:
self.format = self.format + '\t%%(%s)s' % field
else:
self.format = self.format + format % field
class AbstractDataFile(object):
def flush(self):
self.iFile.flush()
def close(self):
self.iFile.close()
class AbstractDataReader(AbstractDataFile):
def __init__(self, iFileHandle):
self.iFile = smartopen(iFileHandle)
self.iFilename = self.iFile.name
self._iter = None
def _generator(self):
pass
def __iter__(self):
self._iter = self._generator()
return self
def next(self):
for x in self._iter:
return x
raise StopIteration
def readAll(self):
"""Read all entries.
@returns: A list
"""
results = []
for result in self:
results.append(result)
return results
|
luistorresm/odoo
|
refs/heads/8.0
|
addons/auth_openid/controllers/main.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
import getpass
import werkzeug.urls
import werkzeug.exceptions
from openid import oidutil
from openid.store import filestore
from openid.consumer import consumer
from openid.cryptutil import randomString
from openid.extensions import ax, sreg
import openerp
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.addons.web.controllers.main import login_and_redirect, set_cookie_and_redirect
import openerp.http as http
from openerp.http import request
from .. import utils
_logger = logging.getLogger(__name__)
oidutil.log = _logger.debug
def get_system_user():
"""Return system user info string, such as USERNAME-EUID"""
try:
info = getpass.getuser()
except ImportError:
if os.name == 'nt':
# when there is no 'USERNAME' in environment, getpass.getuser()
# fail when trying to import 'pwd' module - which is unix only.
# In that case we have to fallback to real win32 API.
import win32api
info = win32api.GetUserName()
else:
raise
euid = getattr(os, 'geteuid', None) # Non available on some platforms
if euid is not None:
info = '%s-%d' % (info, euid())
return info
_storedir = os.path.join(tempfile.gettempdir(),
'openerp-auth_openid-%s-store' % get_system_user())
class GoogleAppsAwareConsumer(consumer.GenericConsumer):
def complete(self, message, endpoint, return_to):
if message.getOpenIDNamespace() == consumer.OPENID2_NS:
server_url = message.getArg(consumer.OPENID2_NS, 'op_endpoint', '')
if server_url.startswith('https://www.google.com/a/'):
assoc_handle = message.getArg(consumer.OPENID_NS, 'assoc_handle')
assoc = self.store.getAssociation(server_url, assoc_handle)
if assoc:
# update fields
for attr in ['claimed_id', 'identity']:
value = message.getArg(consumer.OPENID2_NS, attr, '')
value = 'https://www.google.com/accounts/o8/user-xrds?uri=%s' % werkzeug.url_quote_plus(value)
message.setArg(consumer.OPENID2_NS, attr, value)
# now, resign the message
message.delArg(consumer.OPENID2_NS, 'sig')
message.delArg(consumer.OPENID2_NS, 'signed')
message = assoc.signMessage(message)
return super(GoogleAppsAwareConsumer, self).complete(message, endpoint, return_to)
class OpenIDController(http.Controller):
_store = filestore.FileOpenIDStore(_storedir)
_REQUIRED_ATTRIBUTES = ['email']
_OPTIONAL_ATTRIBUTES = 'nickname fullname postcode country language timezone'.split()
def _add_extensions(self, oidrequest):
"""Add extensions to the oidrequest"""
sreg_request = sreg.SRegRequest(required=self._REQUIRED_ATTRIBUTES,
optional=self._OPTIONAL_ATTRIBUTES)
oidrequest.addExtension(sreg_request)
ax_request = ax.FetchRequest()
for alias in self._REQUIRED_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=True, alias=alias))
for alias in self._OPTIONAL_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=False, alias=alias))
oidrequest.addExtension(ax_request)
def _get_attributes_from_success_response(self, success_response):
attrs = {}
all_attrs = self._REQUIRED_ATTRIBUTES + self._OPTIONAL_ATTRIBUTES
sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_response)
if sreg_resp:
for attr in all_attrs:
value = sreg_resp.get(attr)
if value is not None:
attrs[attr] = value
ax_resp = ax.FetchResponse.fromSuccessResponse(success_response)
if ax_resp:
for attr in all_attrs:
value = ax_resp.getSingle(utils.SREG2AX[attr])
if value is not None:
attrs[attr] = value
return attrs
def _get_realm(self):
return request.httprequest.host_url
@http.route('/auth_openid/login/verify_direct', type='http', auth='none')
def verify_direct(self, db, url):
result = self._verify(db, url)
if 'error' in result:
return werkzeug.exceptions.BadRequest(result['error'])
if result['action'] == 'redirect':
return werkzeug.utils.redirect(result['value'])
return result['value']
@http.route('/auth_openid/login/verify', type='json', auth='none')
def verify(self, db, url):
return self._verify(db, url)
def _verify(self, db, url):
redirect_to = werkzeug.urls.Href(request.httprequest.host_url + 'auth_openid/login/process')(session_id=request.session_id)
realm = self._get_realm()
session = dict(dbname=db, openid_url=url) # TODO add origin page ?
oidconsumer = consumer.Consumer(session, self._store)
try:
oidrequest = oidconsumer.begin(url)
except consumer.DiscoveryFailure, exc:
fetch_error_string = 'Error in discovery: %s' % (str(exc[0]),)
return {'error': fetch_error_string, 'title': 'OpenID Error'}
if oidrequest is None:
return {'error': 'No OpenID services found', 'title': 'OpenID Error'}
request.session.openid_session = session
self._add_extensions(oidrequest)
if oidrequest.shouldSendRedirect():
redirect_url = oidrequest.redirectURL(realm, redirect_to)
return {'action': 'redirect', 'value': redirect_url, 'session_id': request.session_id}
else:
form_html = oidrequest.htmlMarkup(realm, redirect_to)
return {'action': 'post', 'value': form_html, 'session_id': request.session_id}
@http.route('/auth_openid/login/process', type='http', auth='none')
def process(self, **kw):
session = getattr(request.session, 'openid_session', None)
if not session:
return set_cookie_and_redirect('/')
oidconsumer = consumer.Consumer(session, self._store, consumer_class=GoogleAppsAwareConsumer)
query = request.httprequest.args
info = oidconsumer.complete(query, request.httprequest.base_url)
display_identifier = info.getDisplayIdentifier()
session['status'] = info.status
if info.status == consumer.SUCCESS:
dbname = session['dbname']
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
Modules = registry.get('ir.module.module')
installed = Modules.search_count(cr, SUPERUSER_ID, ['&', ('name', '=', 'auth_openid'), ('state', '=', 'installed')]) == 1
if installed:
Users = registry.get('res.users')
#openid_url = info.endpoint.canonicalID or display_identifier
openid_url = session['openid_url']
attrs = self._get_attributes_from_success_response(info)
attrs['openid_url'] = openid_url
session['attributes'] = attrs
openid_email = attrs.get('email', False)
domain = []
if openid_email:
domain += ['|', ('openid_email', '=', False)]
domain += [('openid_email', '=', openid_email)]
domain += [('openid_url', '=', openid_url), ('active', '=', True)]
ids = Users.search(cr, SUPERUSER_ID, domain)
assert len(ids) < 2
if ids:
user_id = ids[0]
login = Users.browse(cr, SUPERUSER_ID, user_id).login
key = randomString(utils.KEY_LENGTH, '0123456789abcdef')
Users.write(cr, SUPERUSER_ID, [user_id], {'openid_key': key})
# TODO fill empty fields with the ones from sreg/ax
cr.commit()
return login_and_redirect(dbname, login, key)
session['message'] = 'This OpenID identifier is not associated to any active users'
elif info.status == consumer.SETUP_NEEDED:
session['message'] = info.setup_url
elif info.status == consumer.FAILURE and display_identifier:
fmt = "Verification of %s failed: %s"
session['message'] = fmt % (display_identifier, info.message)
else: # FAILURE
# Either we don't understand the code or there is no
# openid_url included with the error. Give a generic
# failure message. The library should supply debug
# information in a log.
session['message'] = 'Verification failed.'
return set_cookie_and_redirect('/#action=login&loginerror=1')
@http.route('/auth_openid/login/status', type='json', auth='none')
def status(self):
session = getattr(request.session, 'openid_session', {})
return {'status': session.get('status'), 'message': session.get('message')}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
waseem18/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/words/protocols/jabber/jstrports.py
|
68
|
# -*- test-case-name: twisted.words.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
""" A temporary placeholder for client-capable strports, until we
sufficient use cases get identified """
from twisted.internet.endpoints import _parse
def _parseTCPSSL(factory, domain, port):
""" For the moment, parse TCP or SSL connections the same """
return (domain, int(port), factory), {}
def _parseUNIX(factory, address):
return (address, factory), {}
_funcs = { "tcp" : _parseTCPSSL,
"unix" : _parseUNIX,
"ssl" : _parseTCPSSL }
def parse(description, factory):
args, kw = _parse(description)
return (args[0].upper(),) + _funcs[args[0]](factory, *args[1:], **kw)
def client(description, factory):
from twisted.application import internet
name, args, kw = parse(description, factory)
return getattr(internet, name + 'Client')(*args, **kw)
|
TimoRoth/oggm
|
refs/heads/master
|
benchmarks/check_bench.py
|
5
|
"""Utility script to check that the benchmark run.
#FIXME: this should somehow be added to CI
"""
if __name__ == '__main__':
import hef_dynamics
import massbalance
import numerics
import track_model_results
for func in dir(hef_dynamics):
if 'time_' not in func:
continue
print('% -- run -- hef_dynamics.{}'.format(func))
func = getattr(hef_dynamics, func)
hef_dynamics.setup()
func()
hef_dynamics.teardown()
for func in dir(massbalance):
if 'time_' not in func:
continue
print('% -- run -- massbalance.{}'.format(func))
func = getattr(massbalance, func)
massbalance.setup()
func()
massbalance.teardown()
for func in dir(numerics):
if 'time_' not in func:
continue
print('% -- run -- numerics.{}'.format(func))
func = getattr(numerics, func)
func()
c = track_model_results.hef_prepro()
gdir = c.setup_cache()
for func in dir(c):
if 'track_' not in func:
continue
func = getattr(c, func)
print('% -- run -- hef_prepro.{} -- out: {}'.format(func.__name__,
func(gdir)))
c = track_model_results.full_workflow()
gdir = c.setup_cache()
for func in dir(c):
if 'track_' not in func:
continue
func = getattr(c, func)
print('% -- run -- full_workflow.{} -- out: {}'.format(func.__name__,
func(gdir)))
c = track_model_results.columbia_calving()
gdir = c.setup_cache()
for func in dir(c):
if 'track_' not in func:
continue
func = getattr(c, func)
print('% -- run -- columbia_calving.{} -- out: '
'{}'.format(func.__name__, func(gdir)))
|
soltanmm/grpc
|
refs/heads/master
|
src/python/grpcio/grpc/framework/common/style.py
|
63
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines an enum for classifying RPC methods by control flow semantics."""
import enum
@enum.unique
class Service(enum.Enum):
"""Describes the control flow style of RPC method implementation."""
INLINE = 'inline'
EVENT = 'event'
|
iluminite/argh-examples
|
refs/heads/master
|
z/scripts/__init__.py
|
12133432
| |
gisce/OCB
|
refs/heads/7.0
|
addons/account_anglo_saxon/purchase.py
|
427
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class purchase_order(osv.osv):
_name = "purchase.order"
_inherit = "purchase.order"
_description = "Purchase Order"
def _choose_account_from_po_line(self, cr, uid, order_line, context=None):
account_id = super(purchase_order, self)._choose_account_from_po_line(cr, uid, order_line, context=context)
if order_line.product_id and not order_line.product_id.type == 'service':
acc_id = order_line.product_id.property_stock_account_input and order_line.product_id.property_stock_account_input.id
if not acc_id:
acc_id = order_line.product_id.categ_id.property_stock_account_input_categ and order_line.product_id.categ_id.property_stock_account_input_categ.id
if acc_id:
fpos = order_line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, acc_id)
return account_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
WikipediaLibrary/TWLight
|
refs/heads/master
|
TWLight/users/management/commands/user_renewal_notice.py
|
1
|
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand
from django.urls import reverse
from TWLight.users.signals import Notice
from TWLight.users.models import Authorization, get_company_name
class Command(BaseCommand):
help = "Sends advance notice to users with expiring authorizations, prompting them to apply for renewal."
def handle(self, *args, **options):
# Get all authorization objects with an expiry date in the next
# four weeks, for which we haven't yet sent a reminder email, and
# exclude users who disabled these emails.
expiring_authorizations = Authorization.objects.filter(
date_expires__lt=datetime.today() + timedelta(weeks=2),
date_expires__gte=datetime.today(),
reminder_email_sent=False,
partners__isnull=False,
).exclude(user__userprofile__send_renewal_notices=False)
for authorization_object in expiring_authorizations:
Notice.user_renewal_notice.send(
sender=self.__class__,
user_wp_username=authorization_object.user.editor.wp_username,
user_email=authorization_object.user.email,
user_lang=authorization_object.user.userprofile.lang,
partner_name=get_company_name(authorization_object),
partner_link=reverse("users:my_library"),
)
# Record that we sent the email so that we only send one.
authorization_object.reminder_email_sent = True
authorization_object.save()
|
aspose-words/Aspose.Words-for-Java
|
refs/heads/master
|
Plugins/Aspose_Words_Java_for_Jython/setup.py
|
4
|
__author__ = 'masood.anwer'
from setuptools import setup, find_packages
setup(
name = 'asposewordsjavajython',
packages = find_packages(),
version = '1.0.0',
description = 'Aspose.Words Java for Jython allows you to use Aspose.Words for Java API in your Jython applications',
author='Masood Anwer',
author_email='masood.anwer@aspose.com',
url='https://github.com/asposewords/Aspose_Words_Java/tree/master/Plugins/Aspose_Words_Java_for_Jython',
classifiers=[
'Programming Language :: Jython',
'Programming Language :: Jython :: 2',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
]
)
|
Krylon360/vimeo-graphite-web
|
refs/heads/master
|
webapp/graphite/render/datalib.py
|
9
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import time
from graphite.logger import log
from graphite.storage import STORE
from graphite.readers import FetchInProgress
class TimeSeries(list):
def __init__(self, name, start, end, step, values, consolidate='average'):
list.__init__(self, values)
self.name = name
self.start = start
self.end = end
self.step = step
self.consolidationFunc = consolidate
self.valuesPerPoint = 1
self.options = {}
def __iter__(self):
if self.valuesPerPoint > 1:
return self.__consolidatingGenerator( list.__iter__(self) )
else:
return list.__iter__(self)
def consolidate(self, valuesPerPoint):
self.valuesPerPoint = int(valuesPerPoint)
def __consolidatingGenerator(self, gen):
buf = []
for x in gen:
buf.append(x)
if len(buf) == self.valuesPerPoint:
while None in buf: buf.remove(None)
if buf:
yield self.__consolidate(buf)
buf = []
else:
yield None
while None in buf: buf.remove(None)
if buf: yield self.__consolidate(buf)
else: yield None
raise StopIteration
def __consolidate(self, values):
usable = [v for v in values if v is not None]
if not usable: return None
if self.consolidationFunc == 'sum':
return sum(usable)
if self.consolidationFunc == 'average':
return float(sum(usable)) / len(usable)
if self.consolidationFunc == 'max':
return max(usable)
if self.consolidationFunc == 'min':
return min(usable)
raise Exception, "Invalid consolidation function!"
def __repr__(self):
return 'TimeSeries(name=%s, start=%s, end=%s, step=%s)' % (self.name, self.start, self.end, self.step)
def getInfo(self):
"""Pickle-friendly representation of the series"""
return {
'name' : self.name,
'start' : self.start,
'end' : self.end,
'step' : self.step,
'values' : list(self),
}
# Data retrieval API
def fetchData(requestContext, pathExpr):
seriesList = []
startTime = int( time.mktime( requestContext['startTime'].timetuple() ) )
endTime = int( time.mktime( requestContext['endTime'].timetuple() ) )
matching_nodes = STORE.find(pathExpr, startTime, endTime, local=requestContext['localOnly'])
fetches = [(node, node.fetch(startTime, endTime)) for node in matching_nodes if node.is_leaf]
for node, results in fetches:
if isinstance(results, FetchInProgress):
results = results.waitForResults()
if not results:
log.info("render.datalib.fetchData :: no results for %s.fetch(%s, %s)" % (node, startTime, endTime))
continue
(timeInfo, values) = results
(start, end, step) = timeInfo
series = TimeSeries(node.path, start, end, step, values)
series.pathExpression = pathExpr #hack to pass expressions through to render functions
seriesList.append(series)
# Prune empty series with duplicate metric paths to avoid showing empty graph elements for old whisper data
names = set([ series.name for series in seriesList ])
for name in names:
series_with_duplicate_names = [ series for series in seriesList if series.name == name ]
empty_duplicates = [ series for series in series_with_duplicate_names if not nonempty(series) ]
if series_with_duplicate_names == empty_duplicates and len(empty_duplicates) > 0: # if they're all empty
empty_duplicates.pop() # make sure we leave one in seriesList
for series in empty_duplicates:
seriesList.remove(series)
return seriesList
def nonempty(series):
for value in series:
if value is not None:
return True
return False
|
achang97/YouTunes
|
refs/heads/master
|
lib/python2.7/site-packages/gunicorn/app/wsgiapp.py
|
14
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
from gunicorn.errors import ConfigError
from gunicorn.app.base import Application
from gunicorn import util
class WSGIApplication(Application):
def init(self, parser, opts, args):
if opts.paste and opts.paste is not None:
app_name = 'main'
path = opts.paste
if '#' in path:
path, app_name = path.split('#')
path = os.path.abspath(os.path.normpath(
os.path.join(util.getcwd(), path)))
if not os.path.exists(path):
raise ConfigError("%r not found" % path)
# paste application, load the config
self.cfgurl = 'config:%s#%s' % (path, app_name)
self.relpath = os.path.dirname(path)
from .pasterapp import paste_config
return paste_config(self.cfg, self.cfgurl, self.relpath)
if len(args) < 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
def chdir(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
# add the path to sys.path
sys.path.insert(0, self.cfg.chdir)
def load_wsgiapp(self):
self.chdir()
# load the app
return util.import_app(self.app_uri)
def load_pasteapp(self):
self.chdir()
# load the paste app
from .pasterapp import load_pasteapp
return load_pasteapp(self.cfgurl, self.relpath, global_conf=self.cfg.paste_global_conf)
def load(self):
if self.cfg.paste is not None:
return self.load_pasteapp()
else:
return self.load_wsgiapp()
def run():
"""\
The ``gunicorn`` command line runner for launching Gunicorn with
generic WSGI applications.
"""
from gunicorn.app.wsgiapp import WSGIApplication
WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run()
if __name__ == '__main__':
run()
|
codrut3/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/wrappers/local_cli_wrapper_test.py
|
21
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for local command-line-interface debug wrapper session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
class LocalCLIDebuggerWrapperSessionForTest(
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Subclasses the wrapper class for testing.
Overrides its CLI-related methods for headless testing environments.
Inserts observer variables for assertions.
"""
def __init__(self,
command_sequence,
sess,
dump_root=None):
"""Constructor of the for-test subclass.
Args:
command_sequence: (list of list of str) A list of command arguments,
including the command prefix, each element of the list is such as:
["run", "-n"],
["print_feed", "input:0"].
sess: See the doc string of LocalCLIDebugWrapperSession.__init__.
dump_root: See the doc string of LocalCLIDebugWrapperSession.__init__.
"""
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self, sess, dump_root=dump_root, log_usage=False)
self._command_sequence = command_sequence
self._command_pointer = 0
# Observer variables.
self.observers = {
"debug_dumps": [],
"tf_errors": [],
"run_start_cli_run_numbers": [],
"run_end_cli_run_numbers": [],
"print_feed_responses": [],
"profiler_py_graphs": [],
"profiler_run_metadata": [],
}
def _prep_cli_for_run_start(self):
pass
def _prep_debug_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
self.observers["debug_dumps"].append(debug_dump)
self.observers["tf_errors"].append(tf_error)
def _prep_profile_cli_for_run_end(self, py_graph, run_metadata):
self.observers["profiler_py_graphs"].append(py_graph)
self.observers["profiler_run_metadata"].append(run_metadata)
def _launch_cli(self):
if self._is_run_start:
self.observers["run_start_cli_run_numbers"].append(self._run_call_count)
else:
self.observers["run_end_cli_run_numbers"].append(self._run_call_count)
readline_cli = ui_factory.get_ui("readline")
self._register_this_run_info(readline_cli)
while True:
command = self._command_sequence[self._command_pointer]
self._command_pointer += 1
try:
if command[0] == "run":
self._run_handler(command[1:])
elif command[0] == "print_feed":
self.observers["print_feed_responses"].append(
self._print_feed_handler(command[1:]))
else:
raise ValueError("Unrecognized command prefix: %s" % command[0])
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mktemp()
self.v = variables.Variable(10.0, name="v")
self.w = variables.Variable(21.0, name="w")
self.delta = constant_op.constant(1.0, name="delta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.w_int = control_flow_ops.with_dependencies(
[self.inc_v],
math_ops.cast(self.w, dtypes.int32, name="w_int_inner"),
name="w_int_outer")
self.ph = array_ops.placeholder(dtypes.float32, name="ph")
self.xph = array_ops.transpose(self.ph, name="xph")
self.m = constant_op.constant(
[[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
self.y = math_ops.matmul(self.m, self.xph, name="y")
self.sparse_ph = array_ops.sparse_placeholder(
dtypes.float32, shape=([5, 5]), name="sparse_placeholder")
self.sparse_add = sparse_ops.sparse_add(self.sparse_ph, self.sparse_ph)
self.sess = session.Session()
# Initialize variable.
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
def testConstructWrapper(self):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), log_usage=False)
def testConstructWrapperWithExistingEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
self.assertTrue(os.path.isdir(self._tmp_dir))
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingNonEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
dir_path = os.path.join(self._tmp_dir, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "dump_root path points to a non-empty directory"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingFileDumpRoot(self):
os.mkdir(self._tmp_dir)
file_path = os.path.join(self._tmp_dir, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(os.path.isfile(file_path))
with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=file_path, log_usage=False)
def testRunsUnderDebugMode(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Assert correct run call numbers for which the CLI has been launched at
# run-start and run-end.
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the TensorFlow runtime errors are picked up and in this case,
# they should be both None.
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunsWithEmptyStringDumpRootWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root="")
# run under debug mode.
wrapped_sess.run(self.inc_v)
self.assertAllClose(11.0, self.sess.run(self.v))
def testRunInfoOutputAtRunEndIsCorrect(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
tfdbg_logo = cli_shared.get_tfdbg_logo()
# The run_info output in the first run() call should contain the tfdbg logo.
self.assertEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
# The run_info output in the second run() call should NOT contain the logo.
self.assertNotEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
def testRunsUnderNonDebugMode(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-n"], ["run", "-n"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunningWithSparsePlaceholderFeedWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
sparse_feed = ([[0, 1], [0, 2]], [10.0, 20.0])
sparse_result = wrapped_sess.run(
self.sparse_add, feed_dict={self.sparse_ph: sparse_feed})
self.assertAllEqual([[0, 1], [0, 2]], sparse_result.indices)
self.assertAllClose([20.0, 40.0], sparse_result.values)
def testRunsUnderNonDebugThenDebugMode(self):
# Do two NON_DEBUG_RUNs, followed by DEBUG_RUNs.
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-n"], ["run"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
# Here, the CLI should have been launched only under the third run,
# because the first and second runs are NON_DEBUG.
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesWithinLimit(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-t", "3"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesOverLimit(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-t", "3"]], self.sess, dump_root=self._tmp_dir)
# run twice, which is less than the number of times specified by the
# command.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([], wrapped_sess.observers["tf_errors"])
def testRunMixingDebugModeAndMultpleTimes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-t", "2"], ["run"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run four times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1, 2],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3, 4], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testDebuggingMakeCallableTensorRunnerWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
v = variables.Variable(42)
tensor_runner = wrapped_sess.make_callable(v)
self.sess.run(v.initializer)
self.assertAllClose(42, tensor_runner())
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
def testDebuggingMakeCallableTensorRunnerWithCustomRunOptionsWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
a = constant_op.constant(42)
tensor_runner = wrapped_sess.make_callable(a)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertAllClose(
42, tensor_runner(options=run_options, run_metadata=run_metadata))
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testDebuggingMakeCallableOperationRunnerWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
v = variables.Variable(10.0)
inc_v = state_ops.assign_add(v, 1.0)
op_runner = wrapped_sess.make_callable(inc_v.op)
self.sess.run(v.initializer)
op_runner()
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual(11.0, self.sess.run(v))
def testDebuggingMakeCallableRunnerWithFeedListWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
ph1 = array_ops.placeholder(dtypes.float32)
ph2 = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph1, ph2)
tensor_runner = wrapped_sess.make_callable(a, feed_list=[ph1, ph2])
self.assertAllClose(42.0, tensor_runner(41.0, 1.0))
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
def testRuntimeErrorShouldBeCaught(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
# Do a run that should lead to an TensorFlow runtime error.
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0], [1.0], [2.0]]})
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the runtime error is caught by the wrapped session properly.
self.assertEqual(1, len(wrapped_sess.observers["tf_errors"]))
tf_error = wrapped_sess.observers["tf_errors"][0]
self.assertEqual("y", tf_error.op.name)
def testRuntimeErrorBeforeGraphExecutionIsRaised(self):
# Use an impossible device name to cause an error before graph execution.
with ops.device("/device:GPU:1337"):
w = variables.Variable([1.0] * 10, name="w")
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]], self.sess, dump_root=self._tmp_dir)
with self.assertRaisesRegexp(errors.OpError, r".*[Dd]evice.*1337.*"):
wrapped_sess.run(w)
def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-f", "v_greater_than_twelve"],
["run", "-f", "v_greater_than_twelve"],
["run"]],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
# Verify that adding the same tensor filter more than once is tolerated
# (i.e., as if it were added only once).
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunTillFilterPassesWorksInConjunctionWithOtherNodeNameFilter(self):
"""Test that --.*_filter flags work in conjunction with -f.
In other words, test that you can use a tensor filter on a subset of
the tensors.
"""
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-f", "v_greater_than_twelve", "--node_name_filter", "v$"],
["run", "-f", "v_greater_than_twelve", "--node_name_filter", "v$"],
["run"]],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
debug_dumps = wrapped_sess.observers["debug_dumps"]
self.assertEqual(2, len(debug_dumps))
self.assertEqual(1, len(debug_dumps[0].dumped_tensor_data))
self.assertEqual("v:0", debug_dumps[0].dumped_tensor_data[0].tensor_name)
self.assertEqual(1, len(debug_dumps[1].dumped_tensor_data))
self.assertEqual("v:0", debug_dumps[1].dumped_tensor_data[0].tensor_name)
def testRunsUnderDebugModeWithWatchFnFilteringNodeNames(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--node_name_filter", "inc.*"],
["run", "--node_name_filter", "delta"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringOpTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--node_name_filter", "delta"],
["run", "--op_type_filter", "AssignAdd"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringTensorDTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--op_type_filter", "Variable.*"],
["run", "--tensor_dtype_filter", "int32"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(2, dumps.size)
self.assertItemsEqual(
["v", "w"], [dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(2, dumps.size)
self.assertEqual(
["w_int_inner", "w_int_outer"],
[dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
def testRunsUnderDebugModeWithWatchFnFilteringOpTypesAndTensorDTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--op_type_filter", "Cast", "--tensor_dtype_filter", "int32"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("w_int_inner", dumps.dumped_tensor_data[0].node_name)
def testPrintFeedPrintsFeedValueForTensorFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "ph:0"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["Tensor \"ph:0 (feed)\":", "", "[[0.0, 1.0, 2.0]]"],
print_feed_responses[0].lines)
def testPrintFeedPrintsFeedValueForTensorNameFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "ph:0"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={"ph:0": [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["Tensor \"ph:0 (feed)\":", "", "[[0.0, 1.0, 2.0]]"],
print_feed_responses[0].lines)
def testPrintFeedPrintsErrorForInvalidFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "spam"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={"ph:0": [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["ERROR: The feed_dict of the current run does not contain the key "
"spam"], print_feed_responses[0].lines)
def testPrintFeedPrintsErrorWhenFeedDictIsNone(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "spam"], ["run"], ["run"]], self.sess)
wrapped_sess.run(self.w_int)
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["ERROR: The feed_dict of the current run is None or empty."],
print_feed_responses[0].lines)
def testRunUnderProfilerModeWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-p"], ["run"]], self.sess)
wrapped_sess.run(self.w_int)
self.assertEqual(1, len(wrapped_sess.observers["profiler_run_metadata"]))
self.assertTrue(
wrapped_sess.observers["profiler_run_metadata"][0].step_stats)
self.assertEqual(1, len(wrapped_sess.observers["profiler_py_graphs"]))
self.assertIsInstance(
wrapped_sess.observers["profiler_py_graphs"][0], ops.Graph)
def testCallingHookDelBeforeAnyRun(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess)
del wrapped_sess
def testCallingShouldStopMethodOnNonWrappedNonMonitoredSessionErrors(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess)
with self.assertRaisesRegexp(
ValueError,
r"The wrapped session .* does not have a method .*should_stop.*"):
wrapped_sess.should_stop()
def testLocalCLIDebugWrapperSessionWorksOnMonitoredSession(self):
monitored_sess = monitored_session.MonitoredSession()
wrapped_monitored_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], monitored_sess)
self.assertFalse(wrapped_monitored_sess.should_stop())
if __name__ == "__main__":
googletest.main()
|
eestay/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor/enrollment.py
|
17
|
"""
Enrollment operations for use by instructor APIs.
Does not include any access control, be sure to check access before calling.
"""
import json
from django.contrib.auth.models import User
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.utils.translation import override as override_language
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from courseware.models import StudentModule
from edxmako.shortcuts import render_to_string
from lang_pref import LANGUAGE_KEY
from submissions import api as sub_api # installed from the edx-submissions repository
from student.models import anonymous_id_for_user
from openedx.core.djangoapps.user_api.models import UserPreference
from microsite_configuration import microsite
class EmailEnrollmentState(object):
""" Store the complete enrollment state of an email in a class """
def __init__(self, course_id, email):
exists_user = User.objects.filter(email=email).exists()
if exists_user:
user = User.objects.get(email=email)
mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_id)
# is_active is `None` if the user is not enrolled in the course
exists_ce = is_active is not None and is_active
full_name = user.profile.name
else:
mode = None
exists_ce = False
full_name = None
ceas = CourseEnrollmentAllowed.objects.filter(course_id=course_id, email=email).all()
exists_allowed = ceas.exists()
state_auto_enroll = exists_allowed and ceas[0].auto_enroll
self.user = exists_user
self.enrollment = exists_ce
self.allowed = exists_allowed
self.auto_enroll = bool(state_auto_enroll)
self.full_name = full_name
self.mode = mode
def __repr__(self):
return "{}(user={}, enrollment={}, allowed={}, auto_enroll={})".format(
self.__class__.__name__,
self.user,
self.enrollment,
self.allowed,
self.auto_enroll,
)
def to_dict(self):
"""
example: {
'user': False,
'enrollment': False,
'allowed': True,
'auto_enroll': True,
}
"""
return {
'user': self.user,
'enrollment': self.enrollment,
'allowed': self.allowed,
'auto_enroll': self.auto_enroll,
}
def get_user_email_language(user):
"""
Return the language most appropriate for writing emails to user. Returns
None if the preference has not been set, or if the user does not exist.
"""
# Calling UserPreference directly instead of get_user_preference because the user requesting the
# information is not "user" and also may not have is_staff access.
return UserPreference.get_value(user, LANGUAGE_KEY)
def enroll_email(course_id, student_email, auto_enroll=False, email_students=False, email_params=None, language=None):
"""
Enroll a student by email.
`student_email` is student's emails e.g. "foo@bar.com"
`auto_enroll` determines what is put in CourseEnrollmentAllowed.auto_enroll
if auto_enroll is set, then when the email registers, they will be
enrolled in the course automatically.
`email_students` determines if student should be notified of action by email.
`email_params` parameters used while parsing email templates (a `dict`).
`language` is the language used to render the email.
returns two EmailEnrollmentState's
representing state before and after the action.
"""
previous_state = EmailEnrollmentState(course_id, student_email)
if previous_state.user:
# if the student is currently unenrolled, don't enroll them in their
# previous mode
course_mode = u"honor"
if previous_state.enrollment:
course_mode = previous_state.mode
CourseEnrollment.enroll_by_email(student_email, course_id, course_mode)
if email_students:
email_params['message'] = 'enrolled_enroll'
email_params['email_address'] = student_email
email_params['full_name'] = previous_state.full_name
send_mail_to_student(student_email, email_params, language=language)
else:
cea, _ = CourseEnrollmentAllowed.objects.get_or_create(course_id=course_id, email=student_email)
cea.auto_enroll = auto_enroll
cea.save()
if email_students:
email_params['message'] = 'allowed_enroll'
email_params['email_address'] = student_email
send_mail_to_student(student_email, email_params, language=language)
after_state = EmailEnrollmentState(course_id, student_email)
return previous_state, after_state
def unenroll_email(course_id, student_email, email_students=False, email_params=None, language=None):
"""
Unenroll a student by email.
`student_email` is student's emails e.g. "foo@bar.com"
`email_students` determines if student should be notified of action by email.
`email_params` parameters used while parsing email templates (a `dict`).
`language` is the language used to render the email.
returns two EmailEnrollmentState's
representing state before and after the action.
"""
previous_state = EmailEnrollmentState(course_id, student_email)
if previous_state.enrollment:
CourseEnrollment.unenroll_by_email(student_email, course_id)
if email_students:
email_params['message'] = 'enrolled_unenroll'
email_params['email_address'] = student_email
email_params['full_name'] = previous_state.full_name
send_mail_to_student(student_email, email_params, language=language)
if previous_state.allowed:
CourseEnrollmentAllowed.objects.get(course_id=course_id, email=student_email).delete()
if email_students:
email_params['message'] = 'allowed_unenroll'
email_params['email_address'] = student_email
# Since no User object exists for this student there is no "full_name" available.
send_mail_to_student(student_email, email_params, language=language)
after_state = EmailEnrollmentState(course_id, student_email)
return previous_state, after_state
def send_beta_role_email(action, user, email_params):
"""
Send an email to a user added or removed as a beta tester.
`action` is one of 'add' or 'remove'
`user` is the User affected
`email_params` parameters used while parsing email templates (a `dict`).
"""
if action == 'add':
email_params['message'] = 'add_beta_tester'
email_params['email_address'] = user.email
email_params['full_name'] = user.profile.name
elif action == 'remove':
email_params['message'] = 'remove_beta_tester'
email_params['email_address'] = user.email
email_params['full_name'] = user.profile.name
else:
raise ValueError("Unexpected action received '{}' - expected 'add' or 'remove'".format(action))
send_mail_to_student(user.email, email_params, language=get_user_email_language(user))
def reset_student_attempts(course_id, student, module_state_key, delete_module=False):
"""
Reset student attempts for a problem. Optionally deletes all student state for the specified problem.
In the previous instructor dashboard it was possible to modify/delete
modules that were not problems. That has been disabled for safety.
`student` is a User
`problem_to_reset` is the name of a problem e.g. 'L2Node1'.
To build the module_state_key 'problem/' and course information will be appended to `problem_to_reset`.
Raises:
ValueError: `problem_state` is invalid JSON.
StudentModule.DoesNotExist: could not load the student module.
submissions.SubmissionError: unexpected error occurred while resetting the score in the submissions API.
"""
# Reset the student's score in the submissions API
# Currently this is used only by open assessment (ORA 2)
# We need to do this *before* retrieving the `StudentModule` model,
# because it's possible for a score to exist even if no student module exists.
if delete_module:
sub_api.reset_score(
anonymous_id_for_user(student, course_id),
course_id.to_deprecated_string(),
module_state_key.to_deprecated_string(),
)
module_to_reset = StudentModule.objects.get(
student_id=student.id,
course_id=course_id,
module_state_key=module_state_key
)
if delete_module:
module_to_reset.delete()
else:
_reset_module_attempts(module_to_reset)
def _reset_module_attempts(studentmodule):
"""
Reset the number of attempts on a studentmodule.
Throws ValueError if `problem_state` is invalid JSON.
"""
# load the state json
problem_state = json.loads(studentmodule.state)
# old_number_of_attempts = problem_state["attempts"]
problem_state["attempts"] = 0
# save
studentmodule.state = json.dumps(problem_state)
studentmodule.save()
def get_email_params(course, auto_enroll, secure=True):
"""
Generate parameters used when parsing email templates.
`auto_enroll` is a flag for auto enrolling non-registered students: (a `boolean`)
Returns a dict of parameters
"""
protocol = 'https' if secure else 'http'
stripped_site_name = microsite.get_value(
'SITE_NAME',
settings.SITE_NAME
)
# TODO: Use request.build_absolute_uri rather than '{proto}://{site}{path}'.format
# and check with the Services team that this works well with microsites
registration_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('register_user')
)
course_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('course_root', kwargs={'course_id': course.id.to_deprecated_string()})
)
# We can't get the url to the course's About page if the marketing site is enabled.
course_about_url = None
if not settings.FEATURES.get('ENABLE_MKTG_SITE', False):
course_about_url = u'{proto}://{site}{path}'.format(
proto=protocol,
site=stripped_site_name,
path=reverse('about_course', kwargs={'course_id': course.id.to_deprecated_string()})
)
is_shib_course = uses_shib(course)
# Composition of email
email_params = {
'site_name': stripped_site_name,
'registration_url': registration_url,
'course': course,
'auto_enroll': auto_enroll,
'course_url': course_url,
'course_about_url': course_about_url,
'is_shib_course': is_shib_course,
}
return email_params
def send_mail_to_student(student, param_dict, language=None):
"""
Construct the email using templates and then send it.
`student` is the student's email address (a `str`),
`param_dict` is a `dict` with keys
[
`site_name`: name given to edX instance (a `str`)
`registration_url`: url for registration (a `str`)
`course_id`: id of course (a `str`)
`auto_enroll`: user input option (a `str`)
`course_url`: url of course (a `str`)
`email_address`: email of student (a `str`)
`full_name`: student full name (a `str`)
`message`: type of email to send and template to use (a `str`)
`is_shib_course`: (a `boolean`)
]
`language` is the language used to render the email. If None the language
of the currently-logged in user (that is, the user sending the email) will
be used.
Returns a boolean indicating whether the email was sent successfully.
"""
# add some helpers and microconfig subsitutions
if 'course' in param_dict:
param_dict['course_name'] = param_dict['course'].display_name_with_default
param_dict['site_name'] = microsite.get_value(
'SITE_NAME',
param_dict['site_name']
)
subject = None
message = None
# see if we are running in a microsite and that there is an
# activation email template definition available as configuration, if so, then render that
message_type = param_dict['message']
email_template_dict = {
'allowed_enroll': (
'emails/enroll_email_allowedsubject.txt',
'emails/enroll_email_allowedmessage.txt'
),
'enrolled_enroll': (
'emails/enroll_email_enrolledsubject.txt',
'emails/enroll_email_enrolledmessage.txt'
),
'allowed_unenroll': (
'emails/unenroll_email_subject.txt',
'emails/unenroll_email_allowedmessage.txt'
),
'enrolled_unenroll': (
'emails/unenroll_email_subject.txt',
'emails/unenroll_email_enrolledmessage.txt'
),
'add_beta_tester': (
'emails/add_beta_tester_email_subject.txt',
'emails/add_beta_tester_email_message.txt'
),
'remove_beta_tester': (
'emails/remove_beta_tester_email_subject.txt',
'emails/remove_beta_tester_email_message.txt'
),
'account_creation_and_enrollment': (
'emails/enroll_email_enrolledsubject.txt',
'emails/account_creation_and_enroll_emailMessage.txt'
),
}
subject_template, message_template = email_template_dict.get(message_type, (None, None))
if subject_template is not None and message_template is not None:
subject, message = render_message_to_string(
subject_template, message_template, param_dict, language=language
)
if subject and message:
# Remove leading and trailing whitespace from body
message = message.strip()
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
send_mail(subject, message, from_address, [student], fail_silently=False)
def render_message_to_string(subject_template, message_template, param_dict, language=None):
"""
Render a mail subject and message templates using the parameters from
param_dict and the given language. If language is None, the platform
default language is used.
Returns two strings that correspond to the rendered, translated email
subject and message.
"""
with override_language(language):
return get_subject_and_message(subject_template, message_template, param_dict)
def get_subject_and_message(subject_template, message_template, param_dict):
"""
Return the rendered subject and message with the appropriate parameters.
"""
subject = render_to_string(subject_template, param_dict)
message = render_to_string(message_template, param_dict)
return subject, message
def uses_shib(course):
"""
Used to return whether course has Shibboleth as the enrollment domain
Returns a boolean indicating if Shibboleth authentication is set for this course.
"""
return course.enrollment_domain and course.enrollment_domain.startswith(settings.SHIBBOLETH_DOMAIN_PREFIX)
|
relman/sevpn-mgmt-py
|
refs/heads/master
|
SevpnMgmtPy/admin_api/rpc_test.py
|
1
|
# -*- coding: utf-8 -*-
class RpcTest:
def __init__(self, int_val=0, int64_val=0L, str_val='', unistr_val=u''):
self.int_val = int_val
self.int64_val = int64_val
self.str_val = str_val
self.unistr_val = unistr_val
def in_rpc_test(self, pack):
if pack is None:
return
self.int_val = pack.get_value("IntValue")
self.int64_val = pack.get_value("Int64Value")
self.str_val = pack.get_value("StrValue")
self.unistr_val = pack.get_value("UniStrValue")
def out_rpc_test(self, pack):
if pack is None:
return
pack.add_value("IntValue", self.int_val)
pack.add_value("Int64Value", self.int64_val)
pack.add_value("StrValue", self.str_val)
pack.add_value("UniStrValue", self.unistr_val)
|
brijeshkesariya/odoo
|
refs/heads/8.0
|
addons/warning/warning.py
|
243
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.')
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'sale_warn' : fields.selection(WARNING_MESSAGE, 'Sales Order', help=WARNING_HELP, required=True),
'sale_warn_msg' : fields.text('Message for Sales Order'),
'purchase_warn' : fields.selection(WARNING_MESSAGE, 'Purchase Order', help=WARNING_HELP, required=True),
'purchase_warn_msg' : fields.text('Message for Purchase Order'),
'picking_warn' : fields.selection(WARNING_MESSAGE, 'Stock Picking', help=WARNING_HELP, required=True),
'picking_warn_msg' : fields.text('Message for Stock Picking'),
'invoice_warn' : fields.selection(WARNING_MESSAGE, 'Invoice', help=WARNING_HELP, required=True),
'invoice_warn_msg' : fields.text('Message for Invoice'),
}
_defaults = {
'sale_warn' : 'no-message',
'purchase_warn' : 'no-message',
'picking_warn' : 'no-message',
'invoice_warn' : 'no-message',
}
class sale_order(osv.osv):
_inherit = 'sale.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_invoice_id': False, 'partner_shipping_id':False, 'payment_term' : False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.sale_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
if partner.sale_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(sale_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class purchase_order(osv.osv):
_inherit = 'purchase.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value':{'partner_address_id': False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part, context=context)
if partner.purchase_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.purchase_warn_msg
warning = {
'title': title,
'message': message
}
if partner.purchase_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(purchase_order, self).onchange_partner_id(cr, uid, ids, part, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
if not partner_id:
return {'value': {
'account_id': False,
'payment_term': False,
}
}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.invoice_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.invoice_warn_msg
warning = {
'title': title,
'message': message
}
if partner.invoice_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice=date_invoice, payment_term=payment_term,
partner_bank_id=partner_bank_id, company_id=company_id, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
if partner.picking_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = {'value': {}}
if warning:
result['warning'] = warning
return result
class product_product(osv.osv):
_inherit = 'product.template'
_columns = {
'sale_line_warn' : fields.selection(WARNING_MESSAGE,'Sales Order Line', help=WARNING_HELP, required=True),
'sale_line_warn_msg' : fields.text('Message for Sales Order Line'),
'purchase_line_warn' : fields.selection(WARNING_MESSAGE,'Purchase Order Line', help=WARNING_HELP, required=True),
'purchase_line_warn_msg' : fields.text('Message for Purchase Order Line'),
}
_defaults = {
'sale_line_warn' : 'no-message',
'purchase_line_warn' : 'no-message',
}
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, warehouse_id=False, context=None):
warning = {}
if not product:
return {'value': {'th_weight' : 0, 'product_packaging': False,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.sale_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.sale_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(sale_order_line, self).product_id_change_with_wh( cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position, flag, warehouse_id=warehouse_id, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
def onchange_product_id(self,cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
warning = {}
if not product:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom or False}, 'domain':{'product_uom':[]}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.purchase_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.purchase_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.purchase_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(purchase_order_line, self).onchange_product_id(cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned, name=name, price_unit=price_unit, state=state, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
LittlePeng/redis-monitor
|
refs/heads/master
|
src/api/controller/MemoryController.py
|
5
|
from BaseController import BaseController
import tornado.ioloop
import tornado.web
import dateutil.parser
import datetime
class MemoryController(BaseController):
def get(self):
server = self.get_argument("server")
from_date = self.get_argument("from", None)
to_date = self.get_argument("to", None)
return_data = dict(data=[],
timestamp=datetime.datetime.now().isoformat())
if from_date==None or to_date==None:
end = datetime.datetime.now()
delta = datetime.timedelta(seconds=60)
start = end - delta
else:
start = dateutil.parser.parse(from_date)
end = dateutil.parser.parse(to_date)
combined_data = []
# TODO: These variables aren't currently used; should they be removed?
prev_max=0
prev_current=0
counter=0
for data in self.stats_provider.get_memory_info(server, start, end):
combined_data.append([data[0], data[1], data[2]])
for data in combined_data:
d = [self.datetime_to_list(data[0]), data[1], data[2]]
return_data['data'].append(d)
self.write(return_data)
|
storm-computers/odoo
|
refs/heads/9.0
|
addons/l10n_fr/l10n_fr.py
|
45
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'siret': fields.char('SIRET', size=14),
'ape': fields.char('APE'),
}
|
sarvex/django
|
refs/heads/master
|
django/contrib/gis/geos/prototypes/misc.py
|
483
|
"""
This module is for the miscellaneous GEOS routines, particularly the
ones that return the area, distance, and length.
"""
from ctypes import POINTER, c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import check_dbl, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.utils.six.moves import range
__all__ = ['geos_area', 'geos_distance', 'geos_length', 'geos_isvalidreason']
class DblFromGeom(GEOSFuncFactory):
"""
Argument is a Geometry, return type is double that is passed
in by reference as the last argument.
"""
restype = c_int # Status code returned
errcheck = staticmethod(check_dbl)
def get_func(self, num_geom=1):
argtypes = [GEOM_PTR for i in range(num_geom)]
argtypes += [POINTER(c_double)]
self.argtypes = argtypes
return super(DblFromGeom, self).get_func()
# ### ctypes prototypes ###
# Area, distance, and length prototypes.
geos_area = DblFromGeom('GEOSArea')
geos_distance = DblFromGeom('GEOSDistance', num_geom=2)
geos_length = DblFromGeom('GEOSLength')
geos_isvalidreason = GEOSFuncFactory(
'GEOSisValidReason', restype=geos_char_p, errcheck=check_string, argtypes=[GEOM_PTR]
)
|
brettchien/LeetCode
|
refs/heads/master
|
232_ImplementQueueUsingStacks.py
|
1
|
class Queue:
# initialize your data structure here.
def __init__(self):
self.stack = []
def size(self):
return len(self.stack)
# @param x, an integer
# @return nothing
def push(self, x):
# push on stack
self.stack.append(x)
# @return nothing
def pop(self):
# pop all elements to another stack
if self.size() == 0:
return None
tmp = []
length = self.size()
for i in range(length - 1):
tmp.append(self.stack.pop(-1))
ans = self.stack.pop(-1)
for i in range(length - 1):
self.stack.append(tmp.pop(-1))
return ans
# @return an integer
def peek(self):
if self.size() == 0:
return None
tmp = []
length = self.size()
for i in range(length - 1):
tmp.append(self.stack.pop(-1))
ans = self.stack.pop(-1)
self.stack.append(ans)
for i in range(length - 1):
self.stack.append(tmp.pop(-1))
return ans
# @return an boolean
def empty(self):
return self.size() == 0
|
clumsy/intellij-community
|
refs/heads/master
|
python/testData/selectWord/slice/after2.py
|
83
|
"hello world again"[<selection>10:12</selection>]
|
gward/buildbot
|
refs/heads/master
|
contrib/hg_buildbot.py
|
4
|
#! /usr/bin/python
# This is a script which delivers Change events from Mercurial to the
# buildmaster each time a changeset is pushed into a repository. Add it to
# the 'incoming' commit hook on your canonical "central" repository, by
# putting something like the following in the .hg/hgrc file of that
# repository:
#
# [hooks]
# incoming.buildbot = /PATH/TO/hg_buildbot.py BUILDMASTER:PORT
#
# Note that both Buildbot and Mercurial must be installed on the repository
# machine.
import os
import sys
import commands
from StringIO import StringIO
from buildbot.scripts import runner
MASTER = sys.argv[1]
CHANGESET_ID = os.environ["HG_NODE"]
# TODO: consider doing 'import mercurial.hg' and extract this information
# using the native python
out = commands.getoutput(
"hg log -r %s --template '{author}\n{files}\n{desc}'" % CHANGESET_ID)
s = StringIO(out)
user = s.readline().strip()
# NOTE: this fail when filenames contain spaces. I cannot find a way to get
# hg to use some other filename separator.
files = s.readline().strip().split()
comments = "".join(s.readlines())
change = {
'master': MASTER,
# note: this is more likely to be a full email address, which would make
# the left-hand "Changes" column kind of wide. The buildmaster should
# probably be improved to display an abbreviation of the username.
'username': user,
'revision': CHANGESET_ID,
'comments': comments,
'files': files,
}
runner.sendchange(change, True)
|
hrishioa/Aviato
|
refs/heads/master
|
flask/Lib/site-packages/shapely/examples/__init__.py
|
25
|
# Examples module
|
gr8linux/AdminScripts
|
refs/heads/master
|
cidr_mrg.py
|
1
|
from netaddr import *
import sys
DEBUG = False
def debugger(msg):
if DEBUG:
print msg
if len(sys.argv) < 2:
print "Use "+sys.argv[0]+" IP_LIST.TXT\n"
print " IP list file format should be \n"
print " a.b.c.d\n"
print " or\n"
print " a.b.c.d/w.x.y.z"
exit()
filename = sys.argv[1]
debugger ("Opening "+filename )
iplist = []
with open(filename,'r') as ipfile:
mylist = ipfile.read().splitlines()
debugger(mylist)
for rl in mylist:
# rl = ipfile.readline()
if(rl):
#rl.strip()
#rl = rl[:-1]
debugger(rl)
debugger(repr(rl))
try:
iplist.append(IPNetwork(rl))
except AddrFormatError:
print "Address format is not defined"
else:
break
if(ipfile):
debugger(iplist)
cidrs = cidr_merge(iplist)
for cid in cidrs:
print cid
ipfile.close()
|
SequencingDOTcom/App-Market-API-integration
|
refs/heads/master
|
python/dev/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
SebDieBln/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/gdal/rasterize_over.py
|
1
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
rasterize_over.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.vector import ogrConnectionString, ogrLayerName
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class rasterize_over(GdalAlgorithm):
INPUT = 'INPUT'
INPUT_RASTER = 'INPUT_RASTER'
FIELD = 'FIELD'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'rasterize.png'))
def commandLineName(self):
return "gdalogr:rasterize_over"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Rasterize (write over existing raster)')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Conversion')
self.addParameter(ParameterVector(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterTableField(self.FIELD,
self.tr('Attribute field'), self.INPUT))
self.addParameter(ParameterRaster(self.INPUT_RASTER,
self.tr('Existing raster layer'), False))
def getConsoleCommands(self):
inLayer = self.getParameterValue(self.INPUT)
ogrLayer = ogrConnectionString(inLayer)[1:-1]
inRasterLayer = self.getParameterValue(self.INPUT_RASTER)
ogrRasterLayer = ogrConnectionString(inRasterLayer)[1:-1]
arguments = []
arguments.append('-a')
arguments.append(unicode(self.getParameterValue(self.FIELD)))
arguments.append('-l')
arguments.append(ogrLayerName(inLayer))
arguments.append(ogrLayer)
arguments.append(ogrRasterLayer)
return ['gdal_rasterize', GdalUtils.escapeAndJoin(arguments)]
|
beni55/sympy
|
refs/heads/master
|
sympy/core/basic.py
|
4
|
"""Base class for all the objects in SymPy"""
from __future__ import print_function, division
from .assumptions import ManagedProperties
from .cache import cacheit
from .core import BasicType, C
from .sympify import _sympify, sympify, SympifyError
from .compatibility import (iterable, Iterator, ordered,
string_types, with_metaclass, zip_longest)
from .decorators import deprecated
from .singleton import S
from inspect import getmro
class Basic(with_metaclass(ManagedProperties)):
"""
Base class for all objects in SymPy.
Conventions:
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
"""
__slots__ = ['_mhash', # hash value
'_args', # arguments
'_assumptions'
]
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
""" Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'hermitian': True,
'imaginary': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real': True,
'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([ inner_key(arg) for arg in args ])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(self, UndefFunc) and isinstance(other, UndefFunc):
if self.class_key() == other.class_key():
return True
else:
return False
if type(self) is not type(other):
# issue 6100 a**1.0 == a like a**2.0 == a**2
if isinstance(self, C.Pow) and self.exp == 1:
return self.base == other
if isinstance(other, C.Pow) and other.exp == 1:
return self == other.base
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other
if isinstance(self, AppliedUndef) and isinstance(other,
AppliedUndef):
if self.class_key() != other.class_key():
return False
elif type(self) is not type(other):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""a != b -> Compare two symbolic trees and see whether they are different
this is the same as:
a.compare(b) != 0
but faster
"""
return not self.__eq__(other)
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
dummy_symbols = [ s for s in self.free_symbols if s.is_Dummy ]
if not dummy_symbols:
return self == other
elif len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
raise ValueError(
"only one dummy symbol allowed on the left-hand side")
if symbol is None:
symbols = other.free_symbols
if not symbols:
return self == other
elif len(symbols) == 1:
symbol = symbols.pop()
else:
raise ValueError("specify a symbol in which expressions should be compared")
tmp = dummy.__class__()
return self.subs(dummy, tmp) == other.subs(symbol, tmp)
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue 5487.
def __repr__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
set([1, 2, I, pi, x, y])
If one or more types are given, the results will contain only
those types of atoms.
Examples
========
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
set([x, y])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
set([1, 2])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
set([1, 2, pi])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
set([1, 2, I, pi])
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
set([x, y])
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
set([1])
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
set([1, 2])
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
set([f(x), sin(y + I*pi)])
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
set([f(x)])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
set([I*pi, 2*sin(y + I*pi)])
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for expr in preorder_traversal(self):
if isinstance(expr, types):
result.add(expr)
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all symbols
except those. Derivative keeps track of symbols with respect to which it
will perform a derivative; those are bound variables, too, so it has
its own symbols method.
Any other method that uses bound variables should implement a symbols
method."""
return set().union(*[a.free_symbols for a in self.args])
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.variables`` as underscore-suffixed numbers
corresponding to their position in ``self.variables``. Enough
underscores are added to ensure that there will be no clash with
existing free symbols.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: 0_}
"""
if not hasattr(self, 'variables'):
return {}
u = "_"
while any(s.name.endswith(u) for s in self.free_symbols):
u += "_"
name = '%%i%s' % u
V = self.variables
return dict(list(zip(V, [C.Symbol(name % i, **v.assumptions0)
for i, v in enumerate(V)])))
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x,y,z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, C.Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
"""
is_real = self.is_real
if is_real is False:
return False
is_number = self.is_number
if is_number is False:
return False
if is_real and is_number:
return True
n, i = [p.evalf(2) for p in self.as_real_imag()]
if not i.is_Number or not n.is_Number:
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
@deprecated(useinstead="iter(self.args)", issue=7717, deprecated_since_version="0.7.6")
def iter_basic_args(self):
"""
Iterates arguments of ``self``.
"""
return iter(self.args)
def as_poly(self, *gens, **args):
"""Converts ``self`` to a polynomial or returns ``None``.
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> print((x**2 + x*y).as_poly())
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + x*y).as_poly(x, y))
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + sin(y)).as_poly(x, y))
None
"""
from sympy.polys import Poly, PolynomialError
try:
poly = Poly(self, *gens, **args)
if not poly.is_Poly:
return None
else:
return poly
except PolynomialError:
return None
def as_content_primitive(self, radical=False):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See docstring of Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A,B,C,D,E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
evalf: calculates the given formula to a desired level of precision
"""
from sympy.core.containers import Dict
from sympy.utilities import default_sort_key
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, dict)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i in range(len(sequence)):
o, n = sequence[i]
so, sn = sympify(o), sympify(n)
if not isinstance(so, Basic):
if type(o) is str:
so = C.Symbol(o)
sequence[i] = (so, sn)
if _aresame(so, sn):
sequence[i] = None
continue
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
if not all(k.is_Atom for k in sequence):
d = {}
for o, n in sequence.items():
try:
ops = o.count_ops(), len(o.args)
except TypeError:
ops = (0, 0)
d.setdefault(ops, []).append((o, n))
newseq = []
for k in sorted(d.keys(), reverse=True):
newseq.extend(
sorted([v[0] for v in d[k]], key=default_sort_key))
sequence = [(k, sequence[k]) for k in newseq]
del newseq, d
else:
sequence = sorted([(k, v) for (k, v) in sequence.items()],
key=default_sort_key)
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = C.Dummy()
for old, new in sequence:
d = C.Dummy(commutative=new.is_commutative)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also: _subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x:pi, y:2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) #doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
if self in rule:
return rule[self]
elif rule:
args = []
for a in self.args:
try:
args.append(a.xreplace(rule))
except AttributeError:
args.append(a)
args = tuple(args)
if not _aresame(args, self.args):
return self.func(*args)
return self
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = sympify(pattern)
if isinstance(pattern, BasicType):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
try:
match = pattern._has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
except AttributeError:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return self.__eq__
def replace(self, query, value, map=False, simultaneous=True, exact=False):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False. In addition, if an
expression containing more than one Wild symbol is being used to match
subexpressions and the ``exact`` flag is True, then the match will only
succeed if non-zero values are received for each Wild that appears in
the match pattern.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a = Wild('a')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
When the default value of False is used with patterns that have
more than one Wild symbol, non-intuitive results may be obtained:
>>> b = Wild('b')
>>> (2*x).replace(a*x + b, b - a)
2/x
For this reason, the ``exact`` option can be used to make the
replacement only when the match gives non-zero values for all
Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a, exact=True)
y - 2
>>> (2*x).replace(a*x + b, b - a, exact=True)
2*x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Dummy
from sympy.simplify.simplify import bottom_up
try:
query = sympify(query)
except SympifyError:
pass
try:
value = sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
# XXX remove the exact flag and make multi-symbol
# patterns use exact=True semantics; to do this the query must
# be tested to find out how many Wild symbols are present.
# See https://groups.google.com/forum/
# ?fromgroups=#!topic/sympy/zPzo5FtRiqI
# for a method of inspecting a function to know how many
# parameters it has.
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**dict([ (
str(key)[:-1], val) for key, val in result.items()]))
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**dict([ (
str(key)[:-1], val) for key, val in result.items()]))
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {} # changes that took place
mask = [] # the dummies that were used as change placeholders
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# don't let this expression be changed during rebuilding
com = getattr(new, 'is_commutative', True)
if com is None:
com = True
d = Dummy(commutative=com)
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
rv = rv.xreplace(r)
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = dict([(k.xreplace(r), v.xreplace(r))
for k, v in mapping.items()])
return rv, mapping
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
from sympy import signsimp
pattern = sympify(pattern)
s = signsimp(self)
p = signsimp(pattern)
# if we still have the same relationship between the types of
# input, then use the sign simplified forms
if (pattern.func == self.func) and (s.func == p.func):
rv = p.matches(s, old=old)
else:
rv = pattern.matches(self, old=old)
return rv
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep = False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [ term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args ]
return self.func(*terms)
else:
return self
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [ a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args ]
else:
args = self.args
if pattern is None or isinstance(self.func, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args)
if rewritten is not None:
return rewritten
return self.func(*args)
def rewrite(self, *args, **hints):
""" Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also the possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin, ], exp)
-I*(exp(I*x) - exp(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], string_types):
rule = '_eval_rewrite_as_' + args[-1]
else:
rule = '_eval_rewrite_as_' + args[-1].__name__
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [ p.__class__ for p in pattern if self.has(p) ]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
@property
@deprecated(useinstead="is_finite", issue=8071, deprecated_since_version="0.7.6")
def is_bounded(self):
return super(Basic, self).__getattribute__('is_finite')
@property
@deprecated(useinstead="is_infinite", issue=8071, deprecated_since_version="0.7.6")
def is_unbounded(self):
return super(Basic, self).__getattribute__('is_infinite')
@deprecated(useinstead="is_zero", issue=8071, deprecated_since_version="0.7.6")
def is_infinitesimal(self):
return super(Basic, self).__getattribute__('is_zero')
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = []
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
from sympy.core import S
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, ratio, measure):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
To SymPy, 2.0 == 2:
>>> from sympy import S
>>> 2.0 == S(2)
True
Since a simple 'same or not' result is sometimes useful, this routine was
written to provide that query:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .function import AppliedUndef, UndefinedFunction as UndefFunc
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
else:
return True
def _atomic(e):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
set([x, y])
>>> _atomic(x + f(y))
set([x, f(y)])
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
set([y, cos(x), Derivative(f(x), x)])
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
try:
free = e.free_symbols
except AttributeError:
return set([e])
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal(Iterator):
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
for subtree in self._preorder_traversal(arg, keys):
yield subtree
elif iterable(node):
for item in node:
for subtree in self._preorder_traversal(item, keys):
yield subtree
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
--------
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
|
leopardhs/ns3_sdn
|
refs/heads/master
|
examples/realtime/realtime-udp-echo.py
|
195
|
#
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Network topology
#
# n0 n1 n2 n3
# | | | |
# =================
# LAN
#
# - UDP flows from n0 to n1 and back
# - DropTail queues
# - Tracing of queues and packet receptions to file "udp-echo.tr"
import ns.applications
import ns.core
import ns.csma
import ns.internet
import ns.network
def main(argv):
#
# Allow the user to override any of the defaults and the above Bind() at
# run-time, via command-line arguments
#
cmd = ns.core.CommandLine()
cmd.Parse(argv)
#
# But since this is a realtime script, don't allow the user to mess with
# that.
#
ns.core.GlobalValue.Bind("SimulatorImplementationType", ns.core.StringValue("ns3::RealtimeSimulatorImpl"))
#
# Explicitly create the nodes required by the topology (shown above).
#
print "Create nodes."
n = ns.network.NodeContainer()
n.Create(4)
internet = ns.internet.InternetStackHelper()
internet.Install(n)
#
# Explicitly create the channels required by the topology (shown above).
#
print ("Create channels.")
csma = ns.csma.CsmaHelper()
csma.SetChannelAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate(5000000)))
csma.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2)));
csma.SetDeviceAttribute("Mtu", ns.core.UintegerValue(1400))
d = csma.Install(n)
#
# We've got the "hardware" in place. Now we need to add IP addresses.
#
print ("Assign IP Addresses.")
ipv4 = ns.internet.Ipv4AddressHelper()
ipv4.SetBase(ns.network.Ipv4Address("10.1.1.0"), ns.network.Ipv4Mask("255.255.255.0"))
i = ipv4.Assign(d)
print ("Create Applications.")
#
# Create a UdpEchoServer application on node one.
#
port = 9 # well-known echo port number
server = ns.applications.UdpEchoServerHelper(port)
apps = server.Install(n.Get(1))
apps.Start(ns.core.Seconds(1.0))
apps.Stop(ns.core.Seconds(10.0))
#
# Create a UdpEchoClient application to send UDP datagrams from node zero to
# node one.
#
packetSize = 1024
maxPacketCount = 500
interPacketInterval = ns.core.Seconds(0.01)
client = ns.applications.UdpEchoClientHelper(i.GetAddress (1), port)
client.SetAttribute("MaxPackets", ns.core.UintegerValue(maxPacketCount))
client.SetAttribute("Interval", ns.core.TimeValue(interPacketInterval))
client.SetAttribute("PacketSize", ns.core.UintegerValue(packetSize))
apps = client.Install(n.Get(0))
apps.Start(ns.core.Seconds(2.0))
apps.Stop(ns.core.Seconds(10.0))
ascii = ns.network.AsciiTraceHelper()
csma.EnableAsciiAll(ascii.CreateFileStream("realtime-udp-echo.tr"))
csma.EnablePcapAll("realtime-udp-echo", False)
#
# Now, do the actual simulation.
#
print ("Run Simulation.")
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
print ("Done.")
if __name__ == '__main__':
import sys
main(sys.argv)
|
Zhongqilong/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/distutils/dep_util.py
|
207
|
"""distutils.dep_util
Utility functions for simple, timestamp-based dependency of files
and groups of files; also, function based entirely on such
timestamp dependency analysis."""
import os
from distutils.errors import DistutilsFileError
def newer (source, target):
"""Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
Raise DistutilsFileError if 'source' does not exist.
"""
if not os.path.exists(source):
raise DistutilsFileError("file '%s' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return 1
from stat import ST_MTIME
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
# newer ()
def newer_pairwise (sources, targets):
"""Walk two filename lists in parallel, testing if each source is newer
than its corresponding target. Return a pair of lists (sources,
targets) where source is newer than target, according to the semantics
of 'newer()'.
"""
if len(sources) != len(targets):
raise ValueError("'sources' and 'targets' must be same length")
# build a pair of lists (sources, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources)):
if newer(sources[i], targets[i]):
n_sources.append(sources[i])
n_targets.append(targets[i])
return (n_sources, n_targets)
# newer_pairwise ()
def newer_group (sources, target, missing='error'):
"""Return true if 'target' is out-of-date with respect to any file
listed in 'sources'. In other words, if 'target' exists and is newer
than every file in 'sources', return false; otherwise return true.
'missing' controls what we do when a source file is missing; the
default ("error") is to blow up with an OSError from inside 'stat()';
if it is "ignore", we silently drop any missing source files; if it is
"newer", any missing source files make us assume that 'target' is
out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
carry out commands that wouldn't work because inputs are missing, but
that doesn't matter because you're not actually going to run the
commands).
"""
# If the target doesn't even exist, then it's definitely out-of-date.
if not os.path.exists(target):
return 1
# Otherwise we have to find out the hard way: if *any* source file
# is more recent than 'target', then 'target' is out-of-date and
# we can immediately return true. If we fall through to the end
# of the loop, then 'target' is up-to-date and we return false.
from stat import ST_MTIME
target_mtime = os.stat(target)[ST_MTIME]
for source in sources:
if not os.path.exists(source):
if missing == 'error': # blow up when we stat() the file
pass
elif missing == 'ignore': # missing source dropped from
continue # target's dependency list
elif missing == 'newer': # missing source means target is
return 1 # out-of-date
source_mtime = os.stat(source)[ST_MTIME]
if source_mtime > target_mtime:
return 1
else:
return 0
# newer_group ()
|
jgmize/kuma
|
refs/heads/master
|
kuma/users/tests/test_templates.py
|
1
|
import json
import pytest
import requests_mock
from constance import config as constance_config
from constance.test.utils import override_config
from django.conf import settings
from pyquery import PyQuery as pq
from waffle.models import Flag
from kuma.core.tests import eq_, ok_
from kuma.core.urlresolvers import reverse
from kuma.core.utils import urlparams
from . import UserTestCase
from .test_views import TESTUSER_PASSWORD
def add_persona_verify_response(mock_requests, data):
mock_requests.post(
settings.PERSONA_VERIFIER_URL,
json=data,
headers={
'content_type': 'application/json',
}
)
@requests_mock.mock()
class SignupTests(UserTestCase):
localizing_client = False
def test_signup_page(self, mock_requests):
add_persona_verify_response(mock_requests, {
'status': 'okay',
'email': 'newuser@test.com',
'audience': 'https://developer-local.allizom.org',
})
url = reverse('persona_login')
response = self.client.post(url, follow=True)
self.assertNotContains(response, 'Sign In Failure')
test_strings = ['Create your MDN profile to continue',
'choose a username',
'having trouble',
'I agree',
'to Mozilla',
'Terms',
'Privacy Notice']
for test_string in test_strings:
self.assertContains(response, test_string)
def test_signup_page_disabled(self, mock_requests):
add_persona_verify_response(mock_requests, {
'status': 'okay',
'email': 'newuser@test.com',
'audience': 'https://developer-local.allizom.org',
})
url = reverse('persona_login')
registration_disabled = Flag.objects.create(
name='registration_disabled',
everyone=True
)
response = self.client.post(url, follow=True)
self.assertNotContains(response, 'Sign In Failure')
self.assertContains(response, 'Profile Creation Disabled')
# re-enable registration
registration_disabled.everyone = False
registration_disabled.save()
response = self.client.post(url, follow=True)
test_strings = ['Create your MDN profile to continue',
'choose a username',
'having trouble']
for test_string in test_strings:
self.assertContains(response, test_string)
class AccountEmailTests(UserTestCase):
localizing_client = True
def test_account_email_page_requires_signin(self):
url = reverse('account_email')
response = self.client.get(url, follow=True)
self.assertContains(response, 'Please sign in')
ok_(len(response.redirect_chain) > 0)
def test_account_email_page_single_email(self):
u = self.user_model.objects.get(username='testuser')
self.client.login(username=u.username, password=TESTUSER_PASSWORD)
url = reverse('account_email')
response = self.client.get(url)
self.assertContains(response, 'is your <em>primary</em> email address')
for test_string in ['Make Primary',
'Re-send Confirmation',
'Remove']:
self.assertNotContains(response, test_string)
def test_account_email_page_multiple_emails(self):
u = self.user_model.objects.get(username='testuser2')
self.client.login(username=u.username, password=TESTUSER_PASSWORD)
url = reverse('account_email')
response = self.client.get(url)
for test_string in ['Make Primary',
'Re-send Confirmation',
'Remove',
'Add Email',
'Edit profile']:
self.assertContains(response, test_string)
class SocialAccountConnectionsTests(UserTestCase):
localizing_client = True
def test_account_connections_page_requires_signin(self):
url = reverse('socialaccount_connections')
response = self.client.get(url, follow=True)
self.assertContains(response, 'Please sign in')
ok_(len(response.redirect_chain) > 0)
def test_account_connections_page(self):
u = self.user_model.objects.get(username='testuser')
self.client.login(username=u.username, password=TESTUSER_PASSWORD)
url = reverse('socialaccount_connections')
response = self.client.get(url)
for test_string in ['Disconnect', 'Connect a new account',
'Edit profile', 'Connect with']:
self.assertContains(response, test_string)
class AllauthPersonaTestCase(UserTestCase):
existing_persona_email = 'testuser@test.com'
existing_persona_username = 'testuser'
localizing_client = False
@requests_mock.mock()
def test_persona_auth_failure_copy(self, mock_requests):
"""
The explanatory page for failed Persona auth contains the
failure copy, and does not contain success messages or a form
to choose a username.
"""
add_persona_verify_response(mock_requests, {
'status': 'failure',
'reason': 'this email address has been naughty'
})
response = self.client.post(reverse('persona_login'), follow=True)
for expected_string in ('Account Sign In Failure',
'An error occurred while attempting to sign '
'in with your account.'):
self.assertContains(response, expected_string)
for unexpected_string in (
'Thanks for signing in to MDN with Persona.',
('<form class="submission readable-line-length" method="post" '
'action="/en-US/users/account/signup">'),
('<input name="username" maxlength="30" type="text"'
' autofocus="autofocus" required="required" '
'placeholder="Username" id="id_username" />'),
'<input type="hidden" name="email" value="',
'" id="id_email" />'):
self.assertNotContains(response, unexpected_string)
@requests_mock.mock()
def test_persona_auth_success_copy(self, mock_requests):
"""
Successful Persona auth of a new user displays a success
message and the Persona-specific signup form, correctly
populated, and does not display the failure copy.
"""
persona_signup_email = 'templates_persona_auth_copy@example.com'
add_persona_verify_response(mock_requests, {
'status': 'okay',
'email': persona_signup_email,
})
response = self.client.post(reverse('persona_login'),
follow=True)
for expected_string in (
# Test that we got:
#
# * Persona sign-in success message
#
# * Form with action set to the account-signup URL.
#
# * Username field, blank
#
# * Hidden email address field, pre-populated with the
# address used to authenticate to Persona.
'Thanks for signing in to MDN with Persona.',
('<form class="submission readable-line-length" method="post" '
'action="/en-US/users/account/signup">'),
('<input autofocus="autofocus" id="id_username" '
'maxlength="30" name="username" placeholder="Username" '
'required="required" type="text" />'),
('<input id="id_email" name="email" type="hidden" '
'value="%s" />' % persona_signup_email)):
self.assertContains(response, expected_string)
for unexpected_string in (
'<Account Sign In Failure',
'<An error occurred while attempting to sign '
'in with your account.'):
self.assertNotContains(response, unexpected_string)
@requests_mock.mock()
def test_persona_signin_copy(self, mock_requests):
"""
After an existing user successfully authenticates with
Persona, their username, an indication that Persona was used
to log in, and a logout link appear in the auth tools section
of the page.
"""
add_persona_verify_response(mock_requests, {
'status': 'okay',
'email': self.existing_persona_email,
})
response = self.client.post(reverse('persona_login'), follow=True)
eq_(response.status_code, 200)
user_url = reverse(
'users.user_detail',
kwargs={
'username': self.existing_persona_username
},
locale=settings.WIKI_DEFAULT_LANGUAGE)
signout_url = urlparams(
reverse('account_logout',
locale=settings.WIKI_DEFAULT_LANGUAGE),
next=reverse('home',
locale=settings.WIKI_DEFAULT_LANGUAGE))
parsed = pq(response.content)
login_info = parsed.find('.oauth-logged-in')
ok_(len(login_info.children()))
signed_in_message = login_info.children()[0]
ok_('title' in signed_in_message.attrib)
eq_('Signed in with Persona',
signed_in_message.attrib['title'])
auth_links = login_info.children()[1].getchildren()
ok_(len(auth_links))
user_link = auth_links[0].getchildren()[0]
ok_('href' in user_link.attrib)
eq_(user_url, user_link.attrib['href'])
signout_link = auth_links[1].getchildren()[0]
ok_('href' in signout_link.attrib)
eq_(signout_url.replace('%2F', '/'), # urlparams() encodes slashes
signout_link.attrib['href'])
def test_persona_form_present(self):
"""
When not authenticated, the Persona authentication components,
with correct data attributes, are present in page contents,
and the 'next' parameter is filled in.
"""
all_docs_url = reverse('wiki.all_documents',
locale=settings.WIKI_DEFAULT_LANGUAGE)
response = self.client.get(all_docs_url, follow=True)
parsed = pq(response.content)
request_info = '{"siteName": "%(siteName)s", "siteLogo": "%(siteLogo)s"}' % \
settings.SOCIALACCOUNT_PROVIDERS['persona']['REQUEST_PARAMETERS']
stub_attrs = (
('data-csrf-token-url', reverse('persona_csrf_token')),
('data-request', request_info),
)
auth_attrs = (
('data-service', 'Persona'),
('data-next', all_docs_url),
)
stub_persona_form = parsed.find('#_persona_login')
ok_(len(stub_persona_form) > 0)
for stub_attr in stub_attrs:
ok_(stub_persona_form.attr(stub_attr[0]))
eq_(stub_attr[1], stub_persona_form.attr(stub_attr[0]))
auth_persona_form = parsed.find('.launch-persona-login')
ok_(len(auth_persona_form) > 0)
for auth_attr in auth_attrs:
ok_(auth_persona_form.attr(auth_attr[0]))
eq_(auth_attr[1], auth_persona_form.attr(auth_attr[0]))
@requests_mock.mock()
def test_persona_signup_copy(self, mock_requests):
"""
After a new user signs up with Persona, their username, an
indication that Persona was used to log in, and a logout link
appear in the auth tools section of the page.
"""
persona_signup_email = 'templates_persona_signup_copy@example.com'
persona_signup_username = 'templates_persona_signup_copy'
add_persona_verify_response(mock_requests, {
'status': 'okay',
'email': persona_signup_email,
})
self.client.post(reverse('persona_login'), follow=True)
data = {'website': '',
'username': persona_signup_username,
'email': persona_signup_email,
'terms': True}
response = self.client.post(
reverse('socialaccount_signup',
locale=settings.WIKI_DEFAULT_LANGUAGE),
data=data, follow=True)
user_url = reverse(
'users.user_detail',
kwargs={'username': persona_signup_username},
locale=settings.WIKI_DEFAULT_LANGUAGE)
signout_url = urlparams(
reverse('account_logout',
locale=settings.WIKI_DEFAULT_LANGUAGE),
next=reverse('home',
locale=settings.WIKI_DEFAULT_LANGUAGE))
parsed = pq(response.content)
login_info = parsed.find('.oauth-logged-in')
ok_(len(login_info.children()))
signed_in_message = login_info.children()[0]
ok_('title' in signed_in_message.attrib)
eq_('Signed in with Persona',
signed_in_message.attrib['title'])
auth_links = login_info.children()[1].getchildren()
ok_(len(auth_links))
user_link = auth_links[0].getchildren()[0]
ok_('href' in user_link.attrib)
eq_(user_url, user_link.attrib['href'])
signout_link = auth_links[1].getchildren()[0]
ok_('href' in signout_link.attrib)
eq_(signout_url.replace('%2F', '/'), # urlparams() encodes slashes
signout_link.attrib['href'])
@pytest.mark.bans
class BanTestCase(UserTestCase):
def test_common_reasons_in_template(self):
# The common reasons to ban users (from constance) should be in template
testuser = self.user_model.objects.get(username='testuser')
self.client.login(username='admin', password='testpass')
ban_url = reverse('users.ban_user',
kwargs={'user_id': testuser.id})
resp = self.client.get(ban_url, follow=True)
eq_(200, resp.status_code)
page = pq(resp.content)
reasons_to_ban_found = page.find('.ban-common-reason')
reasons_to_ban_expected = json.loads(
constance_config.COMMON_REASONS_TO_BAN_USERS
)
eq_(len(reasons_to_ban_found), len(reasons_to_ban_expected))
for reason in reasons_to_ban_found:
ok_(reason.text in reasons_to_ban_expected)
@override_config(COMMON_REASONS_TO_BAN_USERS='Not valid JSON')
def test_common_reasons_error(self):
# If there is an error in getting the common reasons from constance,
# then 'Spam' should still show up in the template as the default
testuser = self.user_model.objects.get(username='testuser')
self.client.login(username='admin', password='testpass')
ban_url = reverse('users.ban_user',
kwargs={'user_id': testuser.id})
resp = self.client.get(ban_url, follow=True)
eq_(200, resp.status_code)
page = pq(resp.content)
reasons_to_ban_found = page.find('.ban-common-reason')
reasons_to_ban_expected = ['Spam']
eq_(len(reasons_to_ban_found), len(reasons_to_ban_expected))
for reason in reasons_to_ban_found:
ok_(reason.text in reasons_to_ban_expected)
@override_config(COMMON_REASONS_TO_BAN_USERS='[]')
def test_common_reasons_empty(self):
# If the list of common reasons to ban users in constance is empty,
# then 'Spam' should still show up in the template as the default
testuser = self.user_model.objects.get(username='testuser')
self.client.login(username='admin', password='testpass')
ban_url = reverse('users.ban_user',
kwargs={'user_id': testuser.id})
resp = self.client.get(ban_url, follow=True)
eq_(200, resp.status_code)
page = pq(resp.content)
reasons_to_ban_found = page.find('.ban-common-reason')
reasons_to_ban_expected = ['Spam']
eq_(len(reasons_to_ban_found), len(reasons_to_ban_expected))
for reason in reasons_to_ban_found:
ok_(reason.text in reasons_to_ban_expected)
|
marcsans/cnn-physics-perception
|
refs/heads/master
|
phy/lib/python2.7/site-packages/matplotlib/dviread.py
|
4
|
"""
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor, but it is currently used by the pdf backend for
processing usetex text.
Interface::
dvi = Dvi(filename, 72)
# iterate over pages (but only one page is supported for now):
for page in dvi:
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
from matplotlib.compat import subprocess
from matplotlib import rcParams
import numpy as np
import struct
import sys
import os
if six.PY3:
def ord(x):
return x
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
self.baseline = self._get_baseline(filename)
def _get_baseline(self, filename):
if rcParams['text.latex.preview']:
base, ext = os.path.splitext(filename)
baseline_filename = base + ".baseline"
if os.path.exists(baseline_filename):
with open(baseline_filename, 'rb') as fd:
l = fd.read().split()
height, depth, width = l
return float(depth)
return None
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, boxes) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h,e = font._height_depth_of(g)
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=descent)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
if self.baseline is None:
descent = (maxy - maxy_pure) * d
else:
descent = self.baseline
text = [ ((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d - descent, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=descent)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1)[0])
self._dispatch(byte)
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument *nbytes* long.
Signedness is determined by the *signed* keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode *byte*, read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError("unknown command: byte %d"%byte)
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of dvi file")
if i != 2:
raise ValueError("Unknown dvi format %d"%i)
if num != 25400000 or den != 7227 * 2**16:
raise ValueError("nonstandard units in dvi file")
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError("nonstandard magnification in dvi file")
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_char in dvi file")
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_rule in dvi file")
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_char in dvi file")
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_rule in dvi file")
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError("misplaced bop in dvi file (state %d)" % self.state)
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced eop in dvi file")
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced push in dvi file")
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced pop in dvi file")
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced right in dvi file")
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError("misplaced w in dvi file")
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError("misplaced x in dvi file")
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError("misplaced down in dvi file")
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError("misplaced y in dvi file")
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError("misplaced z in dvi file")
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError("misplaced fnt_num in dvi file")
self.f = k
def _xxx(self, special):
if six.PY3:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and chr(ch)
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
else:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
fontname = n[-l:].decode('ascii')
tfm = _tfmfile(fontname)
if tfm is None:
if six.PY2:
error_class = OSError
else:
error_class = FileNotFoundError
raise error_class("missing font metrics file: %s" % fontname)
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError('tfm checksum mismatch: %s'%n)
vf = _vffile(fontname)
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError("misplaced post in dvi file")
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are *not* used for comparison.
The size is in Adobe points (converted from TeX points).
.. attribute:: texname
Name of the font as used internally by TeX and friends. This
is usually very different from any external font names, and
:class:`dviread.PsfontsMap` can be used to find the external
name of the font.
.. attribute:: size
Size of the font in Adobe points, converted from the slightly
smaller TeX points.
.. attribute:: widths
Widths of glyphs in glyph-space units, typically 1/1000ths of
the point size.
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
if six.PY3 and isinstance(texname, bytes):
texname = texname.decode('ascii')
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(six.iterkeys(tfm.width)) + 1
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in xrange(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
def _height_depth_of(self, char):
"""
Height and depth of char in dvi units. For internal use by dviread.py.
"""
result = []
for metric,name in ((self._tfm.height, "height"),
(self._tfm.depth, "depth")):
value = metric.get(char, None)
if value is None:
matplotlib.verbose.report(
'No %s for char %d in font %s' % (name, char, self.texname),
'debug')
result.append(0)
else:
result.append(_mul2012(value, self._scale))
return result
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
try:
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
finally:
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError("Packet length mismatch in vf file")
else:
if byte in (139, 140) or byte >= 243:
raise ValueError("Inappropriate opcode %d in vf file" % byte)
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError("unknown vf opcode %d" % byte)
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError("Misplaced packet in vf file")
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of vf file")
if i != 202:
raise ValueError("Unknown vf format %d" % i)
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
.. attribute:: checksum
Used for verifying against the dvi file.
.. attribute:: design_size
Design size of the font (in what units?)
.. attribute:: width
Width of each character, needs to be scaled by the factor
specified in the dvi file. This is a dict because indexing may
not start from 0.
.. attribute:: height
Height of each character.
.. attribute:: depth
Depth of each character.
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
with open(filename, 'rb') as file:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack(str('!6H'), header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack(str('!2I'), header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack(str('!%dI') % (len(x)/4), x)
for x in (widths, heights, depths) ]
for idx, char in enumerate(xrange(bc, ec+1)):
self.width[char] = _fix2comp(widths[ord(char_info[4*idx])])
self.height[char] = _fix2comp(heights[ord(char_info[4*idx+1]) >> 4])
self.depth[char] = _fix2comp(depths[ord(char_info[4*idx+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage::
>>> map = PsfontsMap(find_tex_file('pdftex.map'))
>>> entry = map['ptmbo8r']
>>> entry.texname
'ptmbo8r'
>>> entry.psname
'Times-Bold'
>>> entry.encoding
'/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
>>> entry.effects
{'slant': 0.16700000000000001}
>>> entry.filename
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts (i.e.,
have no filename for them, as in the Times-Bold example above),
while the pdf-related files perhaps only avoid the "Base 14" pdf
fonts. But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
with open(filename, 'rt') as file:
self._parse(file)
def __getitem__(self, texname):
try:
result = self._font[texname]
except KeyError:
result = self._font[texname.decode('ascii')]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
# If the map file specifies multiple encodings for a font, we
# follow pdfTeX in choosing the last one specified. Such
# entries are probably mistakes but they have occurred.
# http://tex.stackexchange.com/questions/10826/
# http://article.gmane.org/gmane.comp.tex.pdftex/4914
texname, psname = words[:2]
effects, encoding, filename = '', None, None
for word in words[2:]:
if not word.startswith('<'):
effects = word
else:
word = word.lstrip('<')
if word.startswith('[') or word.endswith('.enc'):
if encoding is not None:
matplotlib.verbose.report(
'Multiple encodings for %s = %s'
% (texname, psname), 'debug')
if word.startswith('['):
encoding = word[1:]
else:
encoding = word
else:
assert filename is None
filename = word
eff = effects.split()
effects = {}
try:
effects['slant'] = float(eff[eff.index('SlantFont')-1])
except ValueError:
pass
try:
effects['extend'] = float(eff[eff.index('ExtendFont')-1])
except ValueError:
pass
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
with open(filename, 'rt') as file:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + repr(self.encoding), 'debug-annoying')
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError("Broken name in encoding file: " + w)
return result
def find_tex_file(filename, format=None):
"""
Call :program:`kpsewhich` to find a file in the texmf tree. If
*format* is not None, it is used as the value for the
`--format` option.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
.. seealso::
`Kpathsea documentation <http://www.tug.org/kpathsea/>`_
The library that :program:`kpsewhich` is part of.
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
# stderr is unused, but reading it avoids a subprocess optimization
# that breaks EINTR handling in some Python versions:
# http://bugs.python.org/issue12493
# https://github.com/matplotlib/matplotlib/issues/633
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result.decode('ascii')
# With multiple text objects per figure (e.g., tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print('=== new page ===')
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print('font', f.texname, 'scaled', f._scale/pow(2.0,20))
fPrev = f
print(x,y,c, 32 <= c < 128 and chr(c) or '.', w)
for x,y,w,h in page.boxes:
print(x,y,'BOX',w,h)
|
romanzenka/myrimatch
|
refs/heads/master
|
freicore/libraries/boost-build/tools/types/asm.py
|
3
|
# Copyright Craig Rodrigues 2005.
# Copyright (c) 2008 Steven Watanabe
#
# Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from b2.build import type
def register():
type.register_type('ASM', ['s', 'S', 'asm'])
register()
|
sharhar/USB-Thing
|
refs/heads/master
|
UpdaterFiles/Lib/python-3.5.1.amd64/Lib/plat-darwin/IN.py
|
109
|
# Generated by h2py from /usr/include/netinet/in.h
# Included from sys/appleapiopts.h
# Included from sys/_types.h
# Included from sys/cdefs.h
def __P(protos): return protos
def __STRING(x): return #x
def __P(protos): return ()
def __STRING(x): return "x"
def __attribute__(x): return
def __COPYRIGHT(s): return __IDSTRING(copyright,s)
def __RCSID(s): return __IDSTRING(rcsid,s)
def __SCCSID(s): return __IDSTRING(sccsid,s)
def __PROJECT_VERSION(s): return __IDSTRING(project_version,s)
__DARWIN_UNIX03 = 1
__DARWIN_UNIX03 = 0
__DARWIN_UNIX03 = 0
__DARWIN_UNIX03 = 1
__DARWIN_64_BIT_INO_T = 1
__DARWIN_64_BIT_INO_T = 0
__DARWIN_64_BIT_INO_T = 0
__DARWIN_NON_CANCELABLE = 0
__DARWIN_VERS_1050 = 1
__DARWIN_VERS_1050 = 0
__DARWIN_SUF_UNIX03 = "$UNIX2003"
__DARWIN_SUF_UNIX03_SET = 1
__DARWIN_SUF_UNIX03_SET = 0
__DARWIN_SUF_64_BIT_INO_T = "$INODE64"
__DARWIN_SUF_NON_CANCELABLE = "$NOCANCEL"
__DARWIN_SUF_1050 = "$1050"
__DARWIN_SUF_UNIX03_SET = 0
__DARWIN_SUF_EXTSN = "$DARWIN_EXTSN"
__DARWIN_LONG_DOUBLE_IS_DOUBLE = 0
def __DARWIN_LDBL_COMPAT(x): return
def __DARWIN_LDBL_COMPAT2(x): return
__DARWIN_LONG_DOUBLE_IS_DOUBLE = 1
def __DARWIN_LDBL_COMPAT(x): return
def __DARWIN_LDBL_COMPAT2(x): return
__DARWIN_LONG_DOUBLE_IS_DOUBLE = 0
_DARWIN_FEATURE_LONG_DOUBLE_IS_DOUBLE = 1
_DARWIN_FEATURE_UNIX_CONFORMANCE = 3
_DARWIN_FEATURE_64_BIT_INODE = 1
# Included from machine/_types.h
__PTHREAD_SIZE__ = 1168
__PTHREAD_ATTR_SIZE__ = 56
__PTHREAD_MUTEXATTR_SIZE__ = 8
__PTHREAD_MUTEX_SIZE__ = 56
__PTHREAD_CONDATTR_SIZE__ = 8
__PTHREAD_COND_SIZE__ = 40
__PTHREAD_ONCE_SIZE__ = 8
__PTHREAD_RWLOCK_SIZE__ = 192
__PTHREAD_RWLOCKATTR_SIZE__ = 16
__PTHREAD_SIZE__ = 596
__PTHREAD_ATTR_SIZE__ = 36
__PTHREAD_MUTEXATTR_SIZE__ = 8
__PTHREAD_MUTEX_SIZE__ = 40
__PTHREAD_CONDATTR_SIZE__ = 4
__PTHREAD_COND_SIZE__ = 24
__PTHREAD_ONCE_SIZE__ = 4
__PTHREAD_RWLOCK_SIZE__ = 124
__PTHREAD_RWLOCKATTR_SIZE__ = 12
__DARWIN_NULL = 0
# Included from stdint.h
__WORDSIZE = 64
__WORDSIZE = 32
INT8_MAX = 127
INT16_MAX = 32767
INT32_MAX = 2147483647
INT8_MIN = -128
INT16_MIN = -32768
INT32_MIN = (-INT32_MAX-1)
UINT8_MAX = 255
UINT16_MAX = 65535
INT_LEAST8_MIN = INT8_MIN
INT_LEAST16_MIN = INT16_MIN
INT_LEAST32_MIN = INT32_MIN
INT_LEAST8_MAX = INT8_MAX
INT_LEAST16_MAX = INT16_MAX
INT_LEAST32_MAX = INT32_MAX
UINT_LEAST8_MAX = UINT8_MAX
UINT_LEAST16_MAX = UINT16_MAX
INT_FAST8_MIN = INT8_MIN
INT_FAST16_MIN = INT16_MIN
INT_FAST32_MIN = INT32_MIN
INT_FAST8_MAX = INT8_MAX
INT_FAST16_MAX = INT16_MAX
INT_FAST32_MAX = INT32_MAX
UINT_FAST8_MAX = UINT8_MAX
UINT_FAST16_MAX = UINT16_MAX
INTPTR_MIN = INT32_MIN
INTPTR_MAX = INT32_MAX
PTRDIFF_MIN = INT32_MIN
PTRDIFF_MAX = INT32_MAX
WCHAR_MAX = 0x7fffffff
WCHAR_MIN = 0
WCHAR_MIN = (-WCHAR_MAX-1)
WINT_MIN = INT32_MIN
WINT_MAX = INT32_MAX
SIG_ATOMIC_MIN = INT32_MIN
SIG_ATOMIC_MAX = INT32_MAX
def INT8_C(v): return (v)
def INT16_C(v): return (v)
def INT32_C(v): return (v)
# Included from sys/socket.h
# Included from machine/_param.h
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SO_DEBUG = 0x0001
SO_ACCEPTCONN = 0x0002
SO_REUSEADDR = 0x0004
SO_KEEPALIVE = 0x0008
SO_DONTROUTE = 0x0010
SO_BROADCAST = 0x0020
SO_USELOOPBACK = 0x0040
SO_LINGER = 0x0080
SO_LINGER = 0x1080
SO_OOBINLINE = 0x0100
SO_REUSEPORT = 0x0200
SO_TIMESTAMP = 0x0400
SO_ACCEPTFILTER = 0x1000
SO_DONTTRUNC = 0x2000
SO_WANTMORE = 0x4000
SO_WANTOOBFLAG = 0x8000
SO_SNDBUF = 0x1001
SO_RCVBUF = 0x1002
SO_SNDLOWAT = 0x1003
SO_RCVLOWAT = 0x1004
SO_SNDTIMEO = 0x1005
SO_RCVTIMEO = 0x1006
SO_ERROR = 0x1007
SO_TYPE = 0x1008
SO_NREAD = 0x1020
SO_NKE = 0x1021
SO_NOSIGPIPE = 0x1022
SO_NOADDRERR = 0x1023
SO_NWRITE = 0x1024
SO_REUSESHAREUID = 0x1025
SO_NOTIFYCONFLICT = 0x1026
SO_LINGER_SEC = 0x1080
SO_RESTRICTIONS = 0x1081
SO_RESTRICT_DENYIN = 0x00000001
SO_RESTRICT_DENYOUT = 0x00000002
SO_RESTRICT_DENYSET = (-2147483648)
SO_LABEL = 0x1010
SO_PEERLABEL = 0x1011
SOL_SOCKET = 0xffff
AF_UNSPEC = 0
AF_UNIX = 1
AF_LOCAL = AF_UNIX
AF_INET = 2
AF_IMPLINK = 3
AF_PUP = 4
AF_CHAOS = 5
AF_NS = 6
AF_ISO = 7
AF_OSI = AF_ISO
AF_ECMA = 8
AF_DATAKIT = 9
AF_CCITT = 10
AF_SNA = 11
AF_DECnet = 12
AF_DLI = 13
AF_LAT = 14
AF_HYLINK = 15
AF_APPLETALK = 16
AF_ROUTE = 17
AF_LINK = 18
pseudo_AF_XTP = 19
AF_COIP = 20
AF_CNT = 21
pseudo_AF_RTIP = 22
AF_IPX = 23
AF_SIP = 24
pseudo_AF_PIP = 25
AF_NDRV = 27
AF_ISDN = 28
AF_E164 = AF_ISDN
pseudo_AF_KEY = 29
AF_INET6 = 30
AF_NATM = 31
AF_SYSTEM = 32
AF_NETBIOS = 33
AF_PPP = 34
AF_ATM = 30
pseudo_AF_HDRCMPLT = 35
AF_RESERVED_36 = 36
AF_NETGRAPH = 32
AF_MAX = 37
SOCK_MAXADDRLEN = 255
_SS_MAXSIZE = 128
PF_UNSPEC = AF_UNSPEC
PF_LOCAL = AF_LOCAL
PF_UNIX = PF_LOCAL
PF_INET = AF_INET
PF_IMPLINK = AF_IMPLINK
PF_PUP = AF_PUP
PF_CHAOS = AF_CHAOS
PF_NS = AF_NS
PF_ISO = AF_ISO
PF_OSI = AF_ISO
PF_ECMA = AF_ECMA
PF_DATAKIT = AF_DATAKIT
PF_CCITT = AF_CCITT
PF_SNA = AF_SNA
PF_DECnet = AF_DECnet
PF_DLI = AF_DLI
PF_LAT = AF_LAT
PF_HYLINK = AF_HYLINK
PF_APPLETALK = AF_APPLETALK
PF_ROUTE = AF_ROUTE
PF_LINK = AF_LINK
PF_XTP = pseudo_AF_XTP
PF_COIP = AF_COIP
PF_CNT = AF_CNT
PF_SIP = AF_SIP
PF_IPX = AF_IPX
PF_RTIP = pseudo_AF_RTIP
PF_PIP = pseudo_AF_PIP
PF_NDRV = AF_NDRV
PF_ISDN = AF_ISDN
PF_KEY = pseudo_AF_KEY
PF_INET6 = AF_INET6
PF_NATM = AF_NATM
PF_SYSTEM = AF_SYSTEM
PF_NETBIOS = AF_NETBIOS
PF_PPP = AF_PPP
PF_RESERVED_36 = AF_RESERVED_36
PF_ATM = AF_ATM
PF_NETGRAPH = AF_NETGRAPH
PF_MAX = AF_MAX
NET_MAXID = AF_MAX
NET_RT_DUMP = 1
NET_RT_FLAGS = 2
NET_RT_IFLIST = 3
NET_RT_STAT = 4
NET_RT_TRASH = 5
NET_RT_IFLIST2 = 6
NET_RT_DUMP2 = 7
NET_RT_MAXID = 8
SOMAXCONN = 128
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_DONTROUTE = 0x4
MSG_EOR = 0x8
MSG_TRUNC = 0x10
MSG_CTRUNC = 0x20
MSG_WAITALL = 0x40
MSG_DONTWAIT = 0x80
MSG_EOF = 0x100
MSG_WAITSTREAM = 0x200
MSG_FLUSH = 0x400
MSG_HOLD = 0x800
MSG_SEND = 0x1000
MSG_HAVEMORE = 0x2000
MSG_RCVMORE = 0x4000
MSG_NEEDSA = 0x10000
CMGROUP_MAX = 16
SCM_RIGHTS = 0x01
SCM_TIMESTAMP = 0x02
SCM_CREDS = 0x03
SHUT_RD = 0
SHUT_WR = 1
SHUT_RDWR = 2
# Included from machine/endian.h
# Included from sys/_endian.h
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
def NTOHL(x): return (x)
def NTOHS(x): return (x)
def HTONL(x): return (x)
def HTONS(x): return (x)
# Included from libkern/_OSByteOrder.h
def __DARWIN_OSSwapConstInt16(x): return \
def __DARWIN_OSSwapConstInt32(x): return \
def __DARWIN_OSSwapConstInt64(x): return \
# Included from libkern/i386/_OSByteOrder.h
def __DARWIN_OSSwapInt16(x): return \
def __DARWIN_OSSwapInt32(x): return \
def __DARWIN_OSSwapInt64(x): return \
def __DARWIN_OSSwapInt16(x): return _OSSwapInt16(x)
def __DARWIN_OSSwapInt32(x): return _OSSwapInt32(x)
def __DARWIN_OSSwapInt64(x): return _OSSwapInt64(x)
def ntohs(x): return __DARWIN_OSSwapInt16(x)
def htons(x): return __DARWIN_OSSwapInt16(x)
def ntohl(x): return __DARWIN_OSSwapInt32(x)
def htonl(x): return __DARWIN_OSSwapInt32(x)
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_TCP = 6
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_UDP = 17
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_PIM = 103
IPPROTO_PGM = 113
IPPROTO_DIVERT = 254
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPROTO_DONE = 257
__DARWIN_IPPORT_RESERVED = 1024
IPPORT_RESERVED = __DARWIN_IPPORT_RESERVED
IPPORT_USERRESERVED = 5000
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
def IN_CLASSA(i): return (((u_int32_t)(i) & (-2147483648)) == 0)
IN_CLASSA_NET = (-16777216)
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((u_int32_t)(i) & (-1073741824)) == (-2147483648))
IN_CLASSB_NET = (-65536)
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & (-536870912)) == (-1073741824))
IN_CLASSC_NET = (-256)
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & (-268435456)) == (-536870912))
IN_CLASSD_NET = (-268435456)
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
def IN_BADCLASS(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
INADDR_NONE = (-1)
def IN_LINKLOCAL(i): return (((u_int32_t)(i) & IN_CLASSB_NET) == IN_LINKLOCALNETNUM)
IN_LOOPBACKNET = 127
INET_ADDRSTRLEN = 16
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_STRIPHDR = 23
IP_RECVTTL = 24
IP_FW_ADD = 40
IP_FW_DEL = 41
IP_FW_FLUSH = 42
IP_FW_ZERO = 43
IP_FW_GET = 44
IP_FW_RESETLOG = 45
IP_OLD_FW_ADD = 50
IP_OLD_FW_DEL = 51
IP_OLD_FW_FLUSH = 52
IP_OLD_FW_ZERO = 53
IP_OLD_FW_GET = 54
IP_NAT__XXX = 55
IP_OLD_FW_RESETLOG = 56
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_TRAFFIC_MGT_BACKGROUND = 65
IP_FORCE_OUT_IFP = 69
TRAFFIC_MGT_SO_BACKGROUND = 0x0001
TRAFFIC_MGT_SO_BG_SUPPRESSED = 0x0002
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
# Included from netinet6/in6.h
__KAME_VERSION = "20010528/apple-darwin"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_PKTINFO = 19
IPV6_HOPLIMIT = 20
IPV6_NEXTHOP = 21
IPV6_HOPOPTS = 22
IPV6_DSTOPTS = 23
IPV6_RTHDR = 24
IPV6_PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_V6ONLY = 27
IPV6_BINDV6ONLY = IPV6_V6ONLY
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_V6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_USETEMPADDR = 32
IPV6CTL_TEMPPLTIME = 33
IPV6CTL_TEMPVLTIME = 34
IPV6CTL_AUTO_LINKLOCAL = 35
IPV6CTL_RIP6STATS = 36
IPV6CTL_MAXFRAGS = 41
IPV6CTL_MAXID = 42
|
CDE-UNIBE/qcat
|
refs/heads/develop
|
apps/api/migrations/0004_apptoken.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-04-29 07:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0003_auto_20170124_0858'),
]
operations = [
migrations.CreateModel(
name='AppToken',
fields=[
('key', models.CharField(max_length=40, primary_key=True, serialize=False, verbose_name='Key')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now_add=True, verbose_name='Updated')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='app_token', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Token',
'verbose_name_plural': 'Tokens',
'abstract': False,
},
),
]
|
digimarc/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/lookuperror_a/migrations/0004_a4.py
|
381
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lookuperror_a', '0003_a3'),
]
operations = [
migrations.CreateModel(
name='A4',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
],
),
]
|
libvirt/autotest
|
refs/heads/master
|
client/tests/kvm/tests/qmp_basic.py
|
1
|
from autotest_lib.client.common_lib import error
from autotest_lib.client.virt import kvm_monitor
def run_qmp_basic(test, params, env):
"""
QMP Specification test-suite: this checks if the *basic* protocol conforms
to its specification, which is file QMP/qmp-spec.txt in QEMU's source tree.
IMPORTANT NOTES:
o Most tests depend heavily on QMP's error information (eg. classes),
this might have bad implications as the error interface is going to
change in QMP
o Command testing is *not* covered in this suite. Each command has its
own specification and should be tested separately
o We use the same terminology as used by the QMP specification,
specially with regard to JSON types (eg. a Python dict is called
a json-object)
o This is divided in sub test-suites, please check the bottom of this
file to check the order in which they are run
TODO:
o Finding which test failed is not as easy as it should be
o Are all those check_*() functions really needed? Wouldn't a
specialized class (eg. a Response class) do better?
"""
def fail_no_key(qmp_dict, key):
if not isinstance(qmp_dict, dict):
raise error.TestFail("qmp_dict is not a dict (it's '%s')" %
type(qmp_dict))
if not key in qmp_dict:
raise error.TestFail("'%s' key doesn't exist in dict ('%s')" %
(key, str(qmp_dict)))
def check_dict_key(qmp_dict, key, keytype):
"""
Performs the following checks on a QMP dict key:
1. qmp_dict is a dict
2. key exists in qmp_dict
3. key is of type keytype
If any of these checks fails, error.TestFail is raised.
"""
fail_no_key(qmp_dict, key)
if not isinstance(qmp_dict[key], keytype):
raise error.TestFail("'%s' key is not of type '%s', it's '%s'" %
(key, keytype, type(qmp_dict[key])))
def check_key_is_dict(qmp_dict, key):
check_dict_key(qmp_dict, key, dict)
def check_key_is_list(qmp_dict, key):
check_dict_key(qmp_dict, key, list)
def check_key_is_str(qmp_dict, key):
check_dict_key(qmp_dict, key, unicode)
def check_str_key(qmp_dict, keyname, value=None):
check_dict_key(qmp_dict, keyname, unicode)
if value and value != qmp_dict[keyname]:
raise error.TestFail("'%s' key value '%s' should be '%s'" %
(keyname, str(qmp_dict[keyname]), str(value)))
def check_key_is_int(qmp_dict, key):
fail_no_key(qmp_dict, key)
try:
value = int(qmp_dict[key])
except Exception:
raise error.TestFail("'%s' key is not of type int, it's '%s'" %
(key, type(qmp_dict[key])))
def check_bool_key(qmp_dict, keyname, value=None):
check_dict_key(qmp_dict, keyname, bool)
if value and value != qmp_dict[keyname]:
raise error.TestFail("'%s' key value '%s' should be '%s'" %
(keyname, str(qmp_dict[keyname]), str(value)))
def check_success_resp(resp, empty=False):
"""
Check QMP OK response.
@param resp: QMP response
@param empty: if True, response should not contain data to return
"""
check_key_is_dict(resp, "return")
if empty and len(resp["return"]) > 0:
raise error.TestFail("success response is not empty ('%s')" %
str(resp))
def check_error_resp(resp, classname=None, datadict=None):
"""
Check QMP error response.
@param resp: QMP response
@param classname: Expected error class name
@param datadict: Expected error data dictionary
"""
check_key_is_dict(resp, "error")
check_key_is_str(resp["error"], "class")
if classname and resp["error"]["class"] != classname:
raise error.TestFail("got error class '%s' expected '%s'" %
(resp["error"]["class"], classname))
check_key_is_dict(resp["error"], "data")
if datadict and resp["error"]["data"] != datadict:
raise error.TestFail("got data dict '%s' expected '%s'" %
(resp["error"]["data"], datadict))
def test_version(version):
"""
Check the QMP greeting message version key which, according to QMP's
documentation, should be:
{ "qemu": { "major": json-int, "minor": json-int, "micro": json-int }
"package": json-string }
"""
check_key_is_dict(version, "qemu")
for key in [ "major", "minor", "micro" ]:
check_key_is_int(version["qemu"], key)
check_key_is_str(version, "package")
def test_greeting(greeting):
check_key_is_dict(greeting, "QMP")
check_key_is_dict(greeting["QMP"], "version")
check_key_is_list(greeting["QMP"], "capabilities")
def greeting_suite(monitor):
"""
Check the greeting message format, as described in the QMP
specfication section '2.2 Server Greeting'.
{ "QMP": { "version": json-object, "capabilities": json-array } }
"""
greeting = monitor.get_greeting()
test_greeting(greeting)
test_version(greeting["QMP"]["version"])
def json_parsing_errors_suite(monitor):
"""
Check that QMP's parser is able to recover from parsing errors, please
check the JSON spec for more info on the JSON syntax (RFC 4627).
"""
# We're quite simple right now and the focus is on parsing errors that
# have already biten us in the past.
#
# TODO: The following test-cases are missing:
#
# - JSON numbers, strings and arrays
# - More invalid characters or malformed structures
# - Valid, but not obvious syntax, like zillion of spaces or
# strings with unicode chars (different suite maybe?)
bad_json = []
# A JSON value MUST be an object, array, number, string, true, false,
# or null
#
# NOTE: QMP seems to ignore a number of chars, like: | and ?
bad_json.append(":")
bad_json.append(",")
# Malformed json-objects
#
# NOTE: sending only "}" seems to break QMP
# NOTE: Duplicate keys are accepted (should it?)
bad_json.append("{ \"execute\" }")
bad_json.append("{ \"execute\": \"query-version\", }")
bad_json.append("{ 1: \"query-version\" }")
bad_json.append("{ true: \"query-version\" }")
bad_json.append("{ []: \"query-version\" }")
bad_json.append("{ {}: \"query-version\" }")
for cmd in bad_json:
resp = monitor.cmd_raw(cmd)
check_error_resp(resp, "JSONParsing")
def test_id_key(monitor):
"""
Check that QMP's "id" key is correctly handled.
"""
# The "id" key must be echoed back in error responses
id_key = "kvm-autotest"
resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id_key)
check_error_resp(resp)
check_str_key(resp, "id", id_key)
# The "id" key must be echoed back in success responses
resp = monitor.cmd_qmp("query-status", id=id_key)
check_success_resp(resp)
check_str_key(resp, "id", id_key)
# The "id" key can be any json-object
for id_key in [ True, 1234, "string again!", [1, [], {}, True, "foo"],
{ "key": {} } ]:
resp = monitor.cmd_qmp("query-status", id=id_key)
check_success_resp(resp)
if resp["id"] != id_key:
raise error.TestFail("expected id '%s' but got '%s'" %
(str(id_key), str(resp["id"])))
def test_invalid_arg_key(monitor):
"""
Currently, the only supported keys in the input object are: "execute",
"arguments" and "id". Although expansion is supported, invalid key
names must be detected.
"""
resp = monitor.cmd_obj({ "execute": "eject", "foobar": True })
check_error_resp(resp, "QMPExtraInputObjectMember",
{ "member": "foobar" })
def test_bad_arguments_key_type(monitor):
"""
The "arguments" key must be an json-object.
We use the eject command to perform the tests, but that's a random
choice, any command that accepts arguments will do, as the command
doesn't get called.
"""
for item in [ True, [], 1, "foo" ]:
resp = monitor.cmd_obj({ "execute": "eject", "arguments": item })
check_error_resp(resp, "QMPBadInputObjectMember",
{ "member": "arguments", "expected": "object" })
def test_bad_execute_key_type(monitor):
"""
The "execute" key must be a json-string.
"""
for item in [ False, 1, {}, [] ]:
resp = monitor.cmd_obj({ "execute": item })
check_error_resp(resp, "QMPBadInputObjectMember",
{ "member": "execute", "expected": "string" })
def test_no_execute_key(monitor):
"""
The "execute" key must exist, we also test for some stupid parsing
errors.
"""
for cmd in [ {}, { "execut": "qmp_capabilities" },
{ "executee": "qmp_capabilities" }, { "foo": "bar" }]:
resp = monitor.cmd_obj(cmd)
check_error_resp(resp) # XXX: check class and data dict?
def test_bad_input_obj_type(monitor):
"""
The input object must be... an json-object.
"""
for cmd in [ "foo", [], True, 1 ]:
resp = monitor.cmd_obj(cmd)
check_error_resp(resp, "QMPBadInputObject", { "expected":"object" })
def test_good_input_obj(monitor):
"""
Basic success tests for issuing QMP commands.
"""
# NOTE: We don't use the cmd_qmp() method here because the command
# object is in a 'random' order
resp = monitor.cmd_obj({ "execute": "query-version" })
check_success_resp(resp)
resp = monitor.cmd_obj({ "arguments": {}, "execute": "query-version" })
check_success_resp(resp)
id = "1234foo"
resp = monitor.cmd_obj({ "id": id, "execute": "query-version",
"arguments": {} })
check_success_resp(resp)
check_str_key(resp, "id", id)
# TODO: would be good to test simple argument usage, but we don't have
# a read-only command that accepts arguments.
def input_object_suite(monitor):
"""
Check the input object format, as described in the QMP specfication
section '2.3 Issuing Commands'.
{ "execute": json-string, "arguments": json-object, "id": json-value }
"""
test_good_input_obj(monitor)
test_bad_input_obj_type(monitor)
test_no_execute_key(monitor)
test_bad_execute_key_type(monitor)
test_bad_arguments_key_type(monitor)
test_id_key(monitor)
test_invalid_arg_key(monitor)
def argument_checker_suite(monitor):
"""
Check that QMP's argument checker is detecting all possible errors.
We use a number of different commands to perform the checks, but the
command used doesn't matter much as QMP performs argument checking
_before_ calling the command.
"""
# stop doesn't take arguments
resp = monitor.cmd_qmp("stop", { "foo": 1 })
check_error_resp(resp, "InvalidParameter", { "name": "foo" })
# required argument omitted
resp = monitor.cmd_qmp("screendump")
check_error_resp(resp, "MissingParameter", { "name": "filename" })
# 'bar' is not a valid argument
resp = monitor.cmd_qmp("screendump", { "filename": "outfile",
"bar": "bar" })
check_error_resp(resp, "InvalidParameter", { "name": "bar"})
# test optional argument: 'force' is omitted, but it's optional, so
# the handler has to be called. Test this happens by checking an
# error that is generated by the handler itself.
resp = monitor.cmd_qmp("eject", { "device": "foobar" })
check_error_resp(resp, "DeviceNotFound")
# filename argument must be a json-string
for arg in [ {}, [], 1, True ]:
resp = monitor.cmd_qmp("screendump", { "filename": arg })
check_error_resp(resp, "InvalidParameterType",
{ "name": "filename", "expected": "string" })
# force argument must be a json-bool
for arg in [ {}, [], 1, "foo" ]:
resp = monitor.cmd_qmp("eject", { "force": arg, "device": "foo" })
check_error_resp(resp, "InvalidParameterType",
{ "name": "force", "expected": "bool" })
# val argument must be a json-int
for arg in [ {}, [], True, "foo" ]:
resp = monitor.cmd_qmp("memsave", { "val": arg, "filename": "foo",
"size": 10 })
check_error_resp(resp, "InvalidParameterType",
{ "name": "val", "expected": "int" })
# value argument must be a json-number
for arg in [ {}, [], True, "foo" ]:
resp = monitor.cmd_qmp("migrate_set_speed", { "value": arg })
check_error_resp(resp, "InvalidParameterType",
{ "name": "value", "expected": "number" })
# qdev-type commands have their own argument checker, all QMP does
# is to skip its checking and pass arguments through. Check this
# works by providing invalid options to device_add and expecting
# an error message from qdev
resp = monitor.cmd_qmp("device_add", { "driver": "e1000",
"foo": "bar" })
check_error_resp(resp, "PropertyNotFound",
{"device": "e1000", "property": "foo"})
def unknown_commands_suite(monitor):
"""
Check that QMP handles unknown commands correctly.
"""
# We also call a HMP-only command, to be sure it will fail as expected
for cmd in [ "bar", "query-", "query-foo", "q", "help" ]:
resp = monitor.cmd_qmp(cmd)
check_error_resp(resp, "CommandNotFound", { "name": cmd })
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
# Look for the first qmp monitor available, otherwise, fail the test
qmp_monitor = None
for m in vm.monitors:
if isinstance(m, kvm_monitor.QMPMonitor):
qmp_monitor = m
if qmp_monitor is None:
raise error.TestError('Could not find a QMP monitor, aborting test')
# Run all suites
greeting_suite(qmp_monitor)
input_object_suite(qmp_monitor)
argument_checker_suite(qmp_monitor)
unknown_commands_suite(qmp_monitor)
json_parsing_errors_suite(qmp_monitor)
# check if QMP is still alive
if not qmp_monitor.is_responsive():
raise error.TestFail('QMP monitor is not responsive after testing')
|
mojeto/django
|
refs/heads/master
|
tests/test_client/__init__.py
|
12133432
| |
redhat-openstack/sahara
|
refs/heads/master-patches
|
sahara/utils/hacking/__init__.py
|
12133432
| |
Maccimo/intellij-community
|
refs/heads/master
|
python/helpers/tests/generator3_tests/data/SkeletonGeneration/inaccessible_class_attribute_py3/before/sdk_skeletons/existing.py
|
12133432
| |
johankaito/fufuka
|
refs/heads/master
|
microblog/flask/venv/lib/python2.7/site-packages/scipy/fftpack/tests/gen_fftw_ref.py
|
73
|
from __future__ import division, print_function, absolute_import
from subprocess import Popen, PIPE, STDOUT
import numpy as np
SZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024]
def gen_data(dt):
arrays = {}
if dt == np.double:
pg = './fftw_double'
elif dt == np.float32:
pg = './fftw_single'
else:
raise ValueError("unknown: %s" % dt)
# Generate test data using FFTW for reference
for type in [1, 2, 3, 4, 5, 6, 7, 8]:
arrays[type] = {}
for sz in SZ:
a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT)
st = [i.strip() for i in a.stdout.readlines()]
arrays[type][sz] = np.fromstring(",".join(st), sep=',', dtype=dt)
return arrays
# generate single precision data
data = gen_data(np.float32)
filename = 'fftw_single_ref'
# Save ref data into npz format
d = {}
d['sizes'] = SZ
for type in [1, 2, 3, 4]:
for sz in SZ:
d['dct_%d_%d' % (type, sz)] = data[type][sz]
d['sizes'] = SZ
for type in [5, 6, 7, 8]:
for sz in SZ:
d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
np.savez(filename, **d)
# generate double precision data
data = gen_data(np.float64)
filename = 'fftw_double_ref'
# Save ref data into npz format
d = {}
d['sizes'] = SZ
for type in [1, 2, 3, 4]:
for sz in SZ:
d['dct_%d_%d' % (type, sz)] = data[type][sz]
d['sizes'] = SZ
for type in [5, 6, 7, 8]:
for sz in SZ:
d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
np.savez(filename, **d)
|
KanchanChauhan/erpnext
|
refs/heads/develop
|
erpnext/setup/doctype/global_defaults/global_defaults.py
|
38
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
"""Global Defaults"""
import frappe
import frappe.defaults
from frappe.utils import cint
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
keydict = {
# "key in defaults": "key in Global Defaults"
"fiscal_year": "current_fiscal_year",
'company': 'default_company',
'currency': 'default_currency',
"country": "country",
'hide_currency_symbol':'hide_currency_symbol',
'account_url':'account_url',
'disable_rounded_total': 'disable_rounded_total',
'disable_in_words': 'disable_in_words',
}
from frappe.model.document import Document
class GlobalDefaults(Document):
def on_update(self):
"""update defaults"""
for key in keydict:
frappe.db.set_default(key, self.get(keydict[key], ''))
# update year start date and year end date from fiscal_year
year_start_end_date = frappe.db.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", self.current_fiscal_year)
if year_start_end_date:
ysd = year_start_end_date[0][0] or ''
yed = year_start_end_date[0][1] or ''
if ysd and yed:
frappe.db.set_default('year_start_date', ysd.strftime('%Y-%m-%d'))
frappe.db.set_default('year_end_date', yed.strftime('%Y-%m-%d'))
# enable default currency
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
self.toggle_rounded_total()
self.toggle_in_words()
# clear cache
frappe.clear_cache()
def get_defaults(self):
return frappe.defaults.get_defaults()
def toggle_rounded_total(self):
self.disable_rounded_total = cint(self.disable_rounded_total)
# Make property setters to hide rounded total fields
for doctype in ("Quotation", "Sales Order", "Sales Invoice", "Delivery Note"):
make_property_setter(doctype, "base_rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "base_rounded_total", "print_hide", 1, "Check")
make_property_setter(doctype, "rounded_total", "hidden", self.disable_rounded_total, "Check")
make_property_setter(doctype, "rounded_total", "print_hide", self.disable_rounded_total, "Check")
def toggle_in_words(self):
self.disable_in_words = cint(self.disable_in_words)
# Make property setters to hide in words fields
for doctype in ("Quotation", "Sales Order", "Sales Invoice", "Delivery Note",
"Supplier Quotation", "Purchase Order", "Purchase Invoice", "Purchase Receipt"):
make_property_setter(doctype, "in_words", "hidden", self.disable_in_words, "Check")
make_property_setter(doctype, "in_words", "print_hide", self.disable_in_words, "Check")
|
campbe13/openhatch
|
refs/heads/master
|
vendor/packages/python-openid/openid/test/test_examples.py
|
87
|
"Test some examples."
import socket
import os.path, unittest, sys, time
from cStringIO import StringIO
import twill.commands, twill.parse, twill.unit
from openid.consumer.discover import \
OpenIDServiceEndpoint, OPENID_1_1_TYPE
from openid.consumer.consumer import AuthRequest
class TwillTest(twill.unit.TestInfo):
"""Variant of twill.unit.TestInfo that runs a function as a test script,
not twill script from a file.
"""
# twill.unit is pretty small to start with, we're overriding
# run_script and bypassing twill.parse, so it may make sense to
# rewrite twill.unit altogether.
# Desirable features:
# * better unittest.TestCase integration.
# - handle logs on setup and teardown.
# - treat TwillAssertionError as failed test assertion, make twill
# assertions more consistant with TestCase.failUnless idioms.
# - better error reporting on failed assertions.
# - The amount of functions passed back and forth between TestInfo
# and TestCase is currently pretty silly.
# * access to child process's logs.
# TestInfo.start_server redirects stdout/stderr to StringIO
# objects which are, afaict, inaccessible to the caller of
# test.unit.run_child_process.
# * notice when the child process dies, i.e. if you muck up and
# your runExampleServer function throws an exception.
def run_script(self):
time.sleep(self.sleep)
# twill.commands.go(self.get_url())
self.script(self)
def splitDir(d, count):
# in python2.4 and above, it's easier to spell this as
# d.rsplit(os.sep, count)
for i in xrange(count):
d = os.path.dirname(d)
return d
def runExampleServer(host, port, data_path):
thisfile = os.path.abspath(sys.modules[__name__].__file__)
topDir = splitDir(thisfile, 3)
exampleDir = os.path.join(topDir, 'examples')
serverExample = os.path.join(exampleDir, 'server.py')
serverModule = {}
execfile(serverExample, serverModule)
serverMain = serverModule['main']
serverMain(host, port, data_path)
class TestServer(unittest.TestCase):
"""Acceptance tests for examples/server.py.
These are more acceptance tests than unit tests as they actually
start the whole server running and test it on its external HTTP
interface.
"""
def setUp(self):
self.twillOutput = StringIO()
self.twillErr = StringIO()
twill.set_output(self.twillOutput)
twill.set_errout(self.twillErr)
# FIXME: make sure we pick an available port.
self.server_port = 8080
# We need something to feed the server as a realm, but it needn't
# be reachable. (Until we test realm verification.)
self.realm = 'http://127.0.0.1/%s' % (self.id(),)
self.return_to = self.realm + '/return_to'
twill.commands.reset_browser()
def runExampleServer(self):
"""Zero-arg run-the-server function to be passed to TestInfo."""
# FIXME - make sure sstore starts clean.
runExampleServer('127.0.0.1', self.server_port, 'sstore')
def v1endpoint(self, port):
"""Return an OpenID 1.1 OpenIDServiceEndpoint for the server."""
base = "http://%s:%s" % (socket.getfqdn('127.0.0.1'), port)
ep = OpenIDServiceEndpoint()
ep.claimed_id = base + "/id/bob"
ep.server_url = base + "/openidserver"
ep.type_uris = [OPENID_1_1_TYPE]
return ep
# TODO: test discovery
def test_checkidv1(self):
"""OpenID 1.1 checkid_setup request."""
ti = TwillTest(self.twill_checkidv1, self.runExampleServer,
self.server_port, sleep=0.2)
twill.unit.run_test(ti)
if self.twillErr.getvalue():
self.fail(self.twillErr.getvalue())
def test_allowed(self):
"""OpenID 1.1 checkid_setup request."""
ti = TwillTest(self.twill_allowed, self.runExampleServer,
self.server_port, sleep=0.2)
twill.unit.run_test(ti)
if self.twillErr.getvalue():
self.fail(self.twillErr.getvalue())
def twill_checkidv1(self, twillInfo):
endpoint = self.v1endpoint(self.server_port)
authreq = AuthRequest(endpoint, assoc=None)
url = authreq.redirectURL(self.realm, self.return_to)
c = twill.commands
try:
c.go(url)
c.get_browser()._browser.set_handle_redirect(False)
c.submit("yes")
c.code(302)
headers = c.get_browser()._browser.response().info()
finalURL = headers['Location']
self.failUnless('openid.mode=id_res' in finalURL, finalURL)
self.failUnless('openid.identity=' in finalURL, finalURL)
except twill.commands.TwillAssertionError, e:
msg = '%s\nFinal page:\n%s' % (
str(e), c.get_browser().get_html())
self.fail(msg)
def twill_allowed(self, twillInfo):
endpoint = self.v1endpoint(self.server_port)
authreq = AuthRequest(endpoint, assoc=None)
url = authreq.redirectURL(self.realm, self.return_to)
c = twill.commands
try:
c.go(url)
c.code(200)
c.get_browser()._browser.set_handle_redirect(False)
c.formvalue(1, 'remember', 'true')
c.find('name="login_as" value="bob"')
c.submit("yes")
c.code(302)
# Since we set remember=yes, the second time we shouldn't
# see that page.
c.go(url)
c.code(302)
headers = c.get_browser()._browser.response().info()
finalURL = headers['Location']
self.failUnless(finalURL.startswith(self.return_to))
except twill.commands.TwillAssertionError, e:
from traceback import format_exc
msg = '%s\nTwill output:%s\nTwill errors:%s\nFinal page:\n%s' % (
format_exc(),
self.twillOutput.getvalue(),
self.twillErr.getvalue(),
c.get_browser().get_html())
self.fail(msg)
def tearDown(self):
twill.set_output(None)
twill.set_errout(None)
if __name__ == '__main__':
unittest.main()
|
ros-infrastructure/ros_buildfarm
|
refs/heads/master
|
scripts/status/generate_blocked_releases_page_job.py
|
2
|
#!/usr/bin/env python3
import argparse
import copy
import sys
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.argument import add_argument_dry_run
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.common import get_release_job_prefix
from ros_buildfarm.common import \
get_repositories_and_script_generating_key_files
from ros_buildfarm.config import get_index
from ros_buildfarm.git import get_repository
from ros_buildfarm.jenkins import configure_job
from ros_buildfarm.jenkins import configure_management_view
from ros_buildfarm.jenkins import connect
from ros_buildfarm.templates import expand_template
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Generate the 'blocked_releases_page' job on Jenkins")
add_argument_config_url(parser)
add_argument_rosdistro_name(parser)
add_argument_dry_run(parser)
args = parser.parse_args(argv)
config = get_index(args.config_url)
job_config = get_job_config(args, config)
jenkins = connect(config.jenkins_url)
configure_management_view(jenkins, dry_run=args.dry_run)
prefix = get_release_job_prefix(args.rosdistro_name)
job_name = '%s_blocked-releases-page' % prefix
configure_job(jenkins, job_name, job_config, dry_run=args.dry_run)
def get_job_config(args, config):
template_name = 'status/blocked_releases_page_job.xml.em'
repository_args, script_generating_key_files = \
get_repositories_and_script_generating_key_files(config=config)
job_data = copy.deepcopy(args.__dict__)
job_data.update({
'ros_buildfarm_repository': get_repository(),
'script_generating_key_files': script_generating_key_files,
'rosdistro_index_url': config.rosdistro_index_url,
'repository_args': repository_args,
'notification_emails':
config.distributions[args.rosdistro_name]['notification_emails'],
})
job_config = expand_template(template_name, job_data)
return job_config
if __name__ == '__main__':
main()
|
abaditsegay/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/command/build_ext.py
|
17
|
"""distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_ext.py 69317 2009-02-05 22:55:00Z tarek.ziade $"
import sys, os, string, re
from types import *
from site import USER_BASE, USER_SITE
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
if os.name == 'nt':
from distutils.msvccompiler import get_build_version
MSVC_VERSION = int(get_build_version())
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext (Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
('user', None,
"add user include, library and rpath"),
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
def finalize_options (self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if type(self.include_dirs) is StringType:
self.include_dirs = string.split(self.include_dirs, os.pathsep)
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
if type(self.libraries) is StringType:
self.libraries = [self.libraries]
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif type(self.library_dirs) is StringType:
self.library_dirs = string.split(self.library_dirs, os.pathsep)
if self.rpath is None:
self.rpath = []
elif type(self.rpath) is StringType:
self.rpath = string.split(self.rpath, os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC'))
if MSVC_VERSION == 9:
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = ''
else:
# win-amd64 or win-ia64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
elif MSVC_VERSION == 8:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS8.0'))
elif MSVC_VERSION == 7:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS7.1'))
else:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VC6'))
# OS/2 (EMX) doesn't support Debug vs Release builds, but has the
# import libraries in its "Config" subdirectory
if os.name == 'os2':
self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config'))
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# for extensions under Linux or Solaris with a shared Python library,
# Python's library directory must be appended to library_dirs
sysconfig.get_config_var('Py_ENABLE_SHARED')
if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu')
or sys.platform.startswith('sunos'))
and sysconfig.get_config_var('Py_ENABLE_SHARED')):
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = string.split(self.define, ',')
self.define = map(lambda symbol: (symbol, '1'), defines)
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = string.split(self.undef, ',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
# finalize_options ()
def run (self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
# run ()
def check_extensions_list (self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if type(extensions) is not ListType:
raise DistutilsSetupError, \
"'ext_modules' option must be a list of Extension instances"
for i in range(len(extensions)):
ext = extensions[i]
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
(ext_name, build_info) = ext
log.warn(("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s'"
"-- please convert to Extension instance" % ext_name))
if type(ext) is not TupleType and len(ext) != 2:
raise DistutilsSetupError, \
("each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
if not (type(ext_name) is StringType and
extension_name_re.match(ext_name)):
raise DistutilsSetupError, \
("first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if type(build_info) is not DictionaryType:
raise DistutilsSetupError, \
("second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs',
'library_dirs',
'libraries',
'extra_objects',
'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (type(macro) is TupleType and
1 <= len(macro) <= 2):
raise DistutilsSetupError, \
("'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
# for extensions
# check_extensions_list ()
def get_source_files (self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs (self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs
# get_outputs ()
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
sources = ext.sources
if sources is None or type(sources) not in (ListType, TupleType):
raise DistutilsSetupError, \
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name
sources = list(sources)
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
# ignore build-lib -- put the compiled extension into
# the source tree along with pure Python modules
modpath = string.split(fullname, '.')
package = string.join(modpath[0:-1], '.')
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_filename,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources (self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
# swig_sources ()
def find_swig (self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
elif os.name == "os2":
# assume swig available in the PATH.
return "swig.exe"
else:
raise DistutilsPlatformError, \
("I don't know how to find (much less run) SWIG "
"on platform '%s'") % os.name
# find_swig ()
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullname (self, ext_name):
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename (self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = string.split(ext_name, '.')
# OS/2 has an 8 character module (extension) limit :-(
if os.name == "os2":
ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]
# extensions in debug_mode are named 'module_d.pyd' under windows
so_ext = get_config_var('SO')
if os.name == 'nt' and self.debug:
return apply(os.path.join, ext_path) + '_d' + so_ext
return os.path.join(*ext_path) + so_ext
def get_export_symbols (self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "init" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "init" function.
"""
initfunc_name = "init" + string.split(ext.name,'.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries (self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows and OS/2, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils.msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform == "os2emx":
# EMX/GCC requires the python library explicitly, and I
# believe VACPP does as well (though not confirmed) - AIM Apr01
template = "python%d%d"
# debug versions of the main DLL aren't supported, at least
# not at this time - AIM Apr01
#if self.debug:
# template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
return ext.libraries + [pythonlib]
else:
return ext.libraries
# class build_ext
|
SrNetoChan/Quantum-GIS
|
refs/heads/master
|
python/gui/auto_additions/qgscolorwidgets.py
|
56
|
# The following has been generated automatically from src/gui/qgscolorwidgets.h
QgsColorTextWidget.ColorTextFormat.baseClass = QgsColorTextWidget
|
goodfeli/pylearn2
|
refs/heads/master
|
pylearn2/datasets/svhn.py
|
16
|
"""
.. todo::
WRITEME
"""
import os
import gc
import warnings
try:
import tables
except ImportError:
warnings.warn("Couldn't import tables, so far SVHN is "
"only supported with PyTables")
import numpy
from theano.compat.six.moves import xrange
from theano import config
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
from pylearn2.utils.string_utils import preprocess
from pylearn2.utils.rng import make_np_rng
class SVHN(dense_design_matrix.DenseDesignMatrixPyTables):
"""
Only for faster access there is a copy of hdf5 file in PYLEARN2_DATA_PATH
but it mean to be only readable. If you wish to modify the data, you
should pass a local copy to the path argument.
Parameters
----------
which_set : WRITEME
path : WRITEME
center : WRITEME
scale : WRITEME
start : WRITEME
stop : WRITEME
axes : WRITEME
preprocessor : WRITEME
"""
mapper = {'train': 0, 'test': 1, 'extra': 2, 'train_all': 3,
'splitted_train': 4, 'valid': 5}
data_path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
def __init__(self, which_set, path=None, center=False, scale=False,
start=None, stop=None, axes=('b', 0, 1, 'c'),
preprocessor = None):
assert which_set in self.mapper.keys()
self.__dict__.update(locals())
del self.self
if path is None:
path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
mode = 'r'
else:
mode = 'r+'
warnings.warn("Because path is not same as PYLEARN2_DATA_PATH "
"be aware that data might have been "
"modified or pre-processed.")
if mode == 'r' and (scale or
center or
(start is not None) or
(stop is not None)):
raise ValueError("Only for speed there is a copy of hdf5 file in "
"PYLEARN2_DATA_PATH but it meant to be only "
"readable. If you wish to modify the data, you "
"should pass a local copy to the path argument.")
# load data
path = preprocess(path)
file_n = "{0}_32x32.h5".format(os.path.join(path, "h5", which_set))
if os.path.isfile(file_n):
make_new = False
else:
make_new = True
warnings.warn("Over riding existing file: {0}".format(file_n))
# if hdf5 file does not exist make them
if make_new:
self.filters = tables.Filters(complib='blosc', complevel=5)
self.make_data(which_set, path)
self.h5file = tables.openFile(file_n, mode=mode)
data = self.h5file.getNode('/', "Data")
if start is not None or stop is not None:
self.h5file, data = self.resize(self.h5file, start, stop)
# rescale or center if permitted
if center and scale:
data.X[:] -= 127.5
data.X[:] /= 127.5
elif center:
data.X[:] -= 127.5
elif scale:
data.X[:] /= 255.
view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),
axes)
super(SVHN, self).__init__(X=data.X, y=data.y,
y_labels=numpy.max(data.y) + 1,
view_converter=view_converter)
if preprocessor:
if which_set in ['train', 'train_all', 'splitted_train']:
can_fit = True
preprocessor.apply(self, can_fit)
self.h5file.flush()
def get_test_set(self):
"""
.. todo::
WRITEME
"""
return SVHN(which_set='test', path=self.path,
center=self.center, scale=self.scale,
start=self.start, stop=self.stop,
axes=self.axes, preprocessor=self.preprocessor)
def make_data(self, which_set, path, shuffle=True):
"""
.. todo::
WRITEME
"""
sizes = {'train': 73257, 'test': 26032, 'extra': 531131,
'train_all': 604388, 'valid': 6000, 'splitted_train': 598388}
image_size = 32 * 32 * 3
h_file_n = "{0}_32x32.h5".format(os.path.join(path, "h5", which_set))
h5file, node = self.init_hdf5(h_file_n,
([sizes[which_set], image_size],
[sizes[which_set], 10]))
# For consistency between experiments better to make new random stream
rng = make_np_rng(None, 322, which_method="shuffle")
def design_matrix_view(data_x):
"""reshape data_x to deisng matrix view
"""
data_x = numpy.transpose(data_x, axes=[3, 2, 0, 1])
data_x = data_x.reshape((data_x.shape[0], 32 * 32 * 3))
return data_x
def load_data(path):
"Loads data from mat files"
data = load(path)
data_x = numpy.cast[config.floatX](data['X'])
data_y = data['y']
del data
gc.collect()
return design_matrix_view(data_x), data_y
def split_train_valid(path, num_valid_train=400,
num_valid_extra=200):
"""
Extract number of class balanced samples from train and extra
sets for validation, and regard the remaining as new train set.
Parameters
----------
num_valid_train : int, optional
Number of samples per class from train
num_valid_extra : int, optional
Number of samples per class from extra
"""
# load difficult train
data = load("{0}train_32x32.mat".format(SVHN.data_path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_train])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = data['X'][:, :, :, train_index]
train_y = data['y'][train_index, :]
valid_x = data['X'][:, :, :, valid_index]
valid_y = data['y'][valid_index, :]
train_size = data['X'].shape[3]
assert train_x.shape[3] == train_size - num_valid_train * 10
assert train_y.shape[0] == train_size - num_valid_train * 10
assert valid_x.shape[3] == num_valid_train * 10
assert valid_y.shape[0] == num_valid_train * 10
del data
gc.collect()
# load extra train
data = load("{0}extra_32x32.mat".format(SVHN.data_path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_extra])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = numpy.concatenate((train_x,
data['X'][:, :, :, train_index]),
axis=3)
train_y = numpy.concatenate((train_y, data['y'][train_index, :]))
valid_x = numpy.concatenate((valid_x,
data['X'][:, :, :, valid_index]),
axis=3)
valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :]))
extra_size = data['X'].shape[3]
sizes['valid'] = (num_valid_train + num_valid_extra) * 10
sizes['splitted_train'] = train_size + extra_size - sizes['valid']
assert train_x.shape[3] == sizes['splitted_train']
assert train_y.shape[0] == sizes['splitted_train']
assert valid_x.shape[3] == sizes['valid']
assert valid_y.shape[0] == sizes['valid']
del data
gc.collect()
train_x = numpy.cast[config.floatX](train_x)
valid_x = numpy.cast[config.floatX](valid_x)
return design_matrix_view(train_x), train_y,\
design_matrix_view(valid_x), valid_y
# The original splits
if which_set in ['train', 'test']:
data_x, data_y = load_data("{0}{1}_32x32.mat".format(path,
which_set))
# Train valid splits
elif which_set in ['splitted_train', 'valid']:
train_data, valid_data = split_train_valid(path)
if which_set == 'splitted_train':
data_x, data_y = train_data
else:
data_x, data_y = valid_data
del train_data
# extra data
elif which_set in ['train_all', 'extra']:
data_x, data_y = load_data("{0}extra_32x32.mat".format(path))
if which_set == 'train_all':
train_x, train_y = load_data("{0}train_32x32.mat".format(path))
data_x = numpy.concatenate((data_x, train_x))
data_y = numpy.concatenate((data_y, data_y))
if shuffle:
index = range(data_x.shape[0])
rng.shuffle(index)
data_x = data_x[index, :]
data_y = data_y[index, :]
assert data_x.shape[0] == sizes[which_set]
assert data_y.shape[0] == sizes[which_set]
SVHN.fill_hdf5(h5file, data_x, data_y, node)
h5file.close()
class SVHN_On_Memory(dense_design_matrix.DenseDesignMatrix):
"""
A version of SVHN dataset that loads everything into the memory instead of
using pytables.
Parameters
----------
which_set : WRITEME
center : WRITEME
scale : WRITEME
start : WRITEME
stop : WRITEME
axes : WRITEME
preprocessor : WRITEME
"""
mapper = {'train': 0, 'test': 1, 'extra': 2, 'train_all': 3,
'splitted_train': 4, 'valid': 5}
def __init__(self, which_set, center=False, scale=False,
start=None, stop=None, axes=('b', 0, 1, 'c'),
preprocessor = None):
assert which_set in self.mapper.keys()
self.__dict__.update(locals())
del self.self
path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
# load data
path = preprocess(path)
data_x, data_y = self.make_data(which_set, path)
# rescale or center if permitted
if center and scale:
data_x -= 127.5
data_x /= 127.5
elif center:
data_x -= 127.5
elif scale:
data_x /= 255.
view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),
axes)
super(SVHN_On_Memory, self).__init__(X=data_x, y=data_y, y_labels=10,
view_converter=view_converter)
if preprocessor:
if which_set in ['train', 'train_all', 'splitted_train']:
can_fit = True
else:
can_fit = False
preprocessor.apply(self, can_fit)
del data_x, data_y
gc.collect()
def get_test_set(self):
"""
.. todo::
WRITEME
"""
return SVHN_On_Memory(which_set='test', path=self.path,
center=self.center, scale=self.scale,
start=self.start, stop=self.stop,
axes=self.axes, preprocessor=self.preprocessor)
def make_data(self, which_set, path, shuffle=True):
"""
.. todo::
WRITEME
"""
sizes = {'train': 73257, 'test': 26032, 'extra': 531131,
'train_all': 604388, 'valid': 6000, 'splitted_train': 598388}
image_size = 32 * 32 * 3
# For consistency between experiments better to make new random stream
rng = make_np_rng(None, 322, which_method="shuffle")
def design_matrix_view(data_x):
"""reshape data_x to deisng matrix view
"""
data_x = numpy.transpose(data_x, axes=[3, 2, 0, 1])
data_x = data_x.reshape((data_x.shape[0], 32 * 32 * 3))
return data_x
def load_data(path):
"Loads data from mat files"
data = load(path)
data_x = numpy.cast[config.floatX](data['X'])
import ipdb
ipdb.set_trace()
data_y = data['y']
del data
gc.collect()
return design_matrix_view(data_x), data_y
def split_train_valid(path, num_valid_train=400,
num_valid_extra=200):
"""
Extract number of class balanced samples from train and extra
sets for validation, and regard the remaining as new train set.
Parameters
----------
num_valid_train : int, optional
Number of samples per class from train
num_valid_extra : int, optional
Number of samples per class from extra
"""
# load difficult train
data = load("{0}train_32x32.mat".format(path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_train])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = data['X'][:, :, :, train_index]
train_y = data['y'][train_index, :]
valid_x = data['X'][:, :, :, valid_index]
valid_y = data['y'][valid_index, :]
train_size = data['X'].shape[3]
assert train_x.shape[3] == train_size - num_valid_train * 10
assert train_y.shape[0] == train_size - num_valid_train * 10
assert valid_x.shape[3] == num_valid_train * 10
assert valid_y.shape[0] == num_valid_train * 10
del data
gc.collect()
# load extra train
data = load("{0}extra_32x32.mat".format(path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_extra])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = numpy.concatenate((train_x,
data['X'][:, :, :, train_index]),
axis=3)
train_y = numpy.concatenate((train_y, data['y'][train_index, :]))
valid_x = numpy.concatenate(
(valid_x, data['X'][:, :, :, valid_index]),
axis=3)
valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :]))
extra_size = data['X'].shape[3]
sizes['valid'] = (num_valid_train + num_valid_extra) * 10
sizes['splitted_train'] = train_size + extra_size - sizes['valid']
assert train_x.shape[3] == sizes['splitted_train']
assert train_y.shape[0] == sizes['splitted_train']
assert valid_x.shape[3] == sizes['valid']
assert valid_y.shape[0] == sizes['valid']
del data
gc.collect()
train_x = numpy.cast[config.floatX](train_x)
valid_x = numpy.cast[config.floatX](valid_x)
return design_matrix_view(train_x), train_y,\
design_matrix_view(valid_x), valid_y
# The original splits
if which_set in ['train', 'test']:
data_x, data_y = load_data("{0}{1}_32x32.mat".format(path,
which_set))
# Train valid splits
elif which_set in ['splitted_train', 'valid']:
train_data, valid_data = split_train_valid(path)
if which_set == 'splitted_train':
data_x, data_y = train_data
else:
data_x, data_y = valid_data
del train_data
# extra data
elif which_set in ['train_all', 'extra']:
data_x, data_y = load_data("{0}extra_32x32.mat".format(path))
if which_set == 'train_all':
train_x, train_y = load_data("{0}train_32x32.mat".format(path))
data_x = numpy.concatenate((data_x, train_x))
data_y = numpy.concatenate((data_y, data_y))
if shuffle:
index = range(data_x.shape[0])
rng.shuffle(index)
data_x = data_x[index, :]
data_y = data_y[index, :]
assert data_x.shape[0] == sizes[which_set]
assert data_y.shape[0] == sizes[which_set]
return data_x, data_y
|
GbalsaC/bitnamiP
|
refs/heads/master
|
venv/lib/python2.7/site-packages/lepl/stream/_test/simple.py
|
2
|
# The contents of this file are subject to the Mozilla Public License
# (MPL) Version 1.1 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License
# at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and
# limitations under the License.
#
# The Original Code is LEPL (http://www.acooke.org/lepl)
# The Initial Developer of the Original Code is Andrew Cooke.
# Portions created by the Initial Developer are Copyright (C) 2009-2010
# Andrew Cooke (andrew@acooke.org). All Rights Reserved.
#
# Alternatively, the contents of this file may be used under the terms
# of the LGPL license (the GNU Lesser General Public License,
# http://www.gnu.org/licenses/lgpl.html), in which case the provisions
# of the LGPL License are applicable instead of those above.
#
# If you wish to allow use of your version of this file only under the
# terms of the LGPL License and not to allow others to use your version
# of this file under the MPL, indicate your decision by deleting the
# provisions above and replace them with the notice and other provisions
# required by the LGPL License. If you do not delete the provisions
# above, a recipient may use your version of this file under either the
# MPL or the LGPL License.
from lepl.support.lib import fmt
from lepl._test.base import BaseTest
from lepl.stream.core import s_empty, s_fmt, s_line, s_next, s_stream
from lepl.stream.factory import DEFAULT_STREAM_FACTORY
class GenericTest(BaseTest):
def test_empty(self):
f = DEFAULT_STREAM_FACTORY
for (constructor, data) in ((f.from_sequence, ''),
(f.from_sequence, []),
(f.from_sequence, ()),
(f.from_string, ''),
(f.from_list, [])):
s = constructor(data)
assert s_empty(s)
try:
s_next(s)
assert False, fmt('expected error: {0}', s)
except StopIteration:
pass
try:
s_line(s, False)
assert False, fmt('expected error: {0}', s)
except StopIteration:
pass
def test_single_value(self):
f = DEFAULT_STREAM_FACTORY
for (constructor, data) in ((f.from_sequence, 'a'),
(f.from_sequence, [1]),
(f.from_sequence, (2,)),
(f.from_string, 'b'),
(f.from_list, ['c'])):
s = constructor(data)
assert not s_empty(s)
(value, n) = s_next(s)
assert value == data
assert s_empty(n)
(line, n) = s_line(s, False)
assert line == data
assert s_empty(n)
def test_two_values(self):
f = DEFAULT_STREAM_FACTORY
for (constructor, data) in ((f.from_sequence, 'ab'),
(f.from_sequence, [1, 2]),
(f.from_sequence, (2,3)),
(f.from_string, 'bc'),
(f.from_list, ['c', 6])):
s = constructor(data)
assert not s_empty(s)
(value, n) = s_next(s)
assert value == data[0:1]
(value, n) = s_next(n)
assert value == data[1:2]
assert s_empty(n)
(line, n) = s_line(s, False)
assert line == data
assert s_empty(n)
def test_string_lines(self):
f = DEFAULT_STREAM_FACTORY
s = f.from_string('line 1\nline 2\nline 3\n')
(l, s) = s_line(s, False)
assert l == 'line 1\n', l
(l, _) = s_line(s, False)
assert l == 'line 2\n', repr(l)
locn = s_fmt(s, '{location}')
assert locn == 'line 2, character 1', locn
sl = s_stream(s, l)
(_, sl) = s_next(sl, count=2)
locn = s_fmt(sl, '{location}')
assert locn == 'line 2, character 3', locn
|
redhat-cip/python-tripleo-wrapper
|
refs/heads/master
|
rdomhelper/shell.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import click
import yaml
import datetime
import logging
import os
import sys
import traceback
from dciclient.v1.api import context as dcicontext
from dciclient.v1.api import job as dcijob
from dciclient.v1.api import jobstate as dcijobstate
import rdomhelper.host0
from rdomhelper import logger
from rdomhelper.provisioners.openstack import os_libvirt
from rdomhelper.provisioners.openstack import utils as os_utils
from rdomhelper import undercloud
LOG = logging.getLogger('__chainsaw__')
def deploy_host0(os_auth_url, os_username, os_password, os_tenant_name, config):
provisioner = config['provisioner']
if provisioner['type'] == 'openstack':
LOG.info("using 'openstack' provisioner")
nova_api = os_utils.build_nova_api(os_auth_url, os_username,
os_password, os_tenant_name)
image_id_to_boot_from = os_utils.get_image_id(nova_api,
provisioner['image']['name'])
flavor_id = os_utils.get_flavor_id(nova_api, provisioner['flavor'])
keypair_id = os_utils.get_keypair_id(nova_api, provisioner['keypair'])
network_id = os_utils.get_network_id(nova_api, provisioner['network'])
nics = [{'net-id': network_id}]
instance_name = "%s-%s" % (provisioner['instance_name_prefix'],
str(datetime.datetime.utcnow()))
LOG.info("building instance '%s'" % instance_name)
os_instance = os_libvirt.build_openstack_instance(
nova_api,
instance_name,
image_id_to_boot_from,
flavor_id,
keypair_id,
nics)
if os_instance:
host0_ip = os_utils.add_a_floating_ip(nova_api, os_instance)
LOG.info("add floating ip '%s'" % host0_ip)
os_utils.add_security_groups(os_instance,
provisioner['security-groups'])
LOG.info("add security groups '%s'" %
provisioner['security-groups'])
LOG.info("instance '%s' ready to use" % instance_name)
else:
LOG.error("instance '%s' failed" % instance_name)
sys.exit(1)
host0 = rdomhelper.host0.Host0(hostname=host0_ip,
user=config['provisioner']['image'].get('user', 'root'),
key_filename=config['ssh']['private_key'])
host0.rhsm_register(
config['rhsm']['login'],
config['rhsm'].get('password', os.environ.get('RHN_PW')),
config['rhsm']['pool_id'])
return host0
@click.command()
@click.option('--os-auth-url', envvar='OS_AUTH_URL', required=True,
help="Keystone auth url.")
@click.option('--os-username', envvar='OS_USERNAME', required=True,
help="Openstack username account.")
@click.option('--os-password', envvar='OS_PASSWORD', required=True,
help="Openstack password account.")
@click.option('--os-tenant-name', envvar='OS_TENANT_NAME', required=True,
help="Openstack tenant name.")
@click.option('--host0-ip', required=False,
help="IP address of a host0 to reuse.")
@click.option('--undercloud-ip', required=False,
help="IP address of an undercloud to reuse.")
@click.option('--config-file', required=True, type=click.File('rb'),
help="Chainsaw path configuration file.")
def cli(os_auth_url, os_username, os_password, os_tenant_name, host0_ip, undercloud_ip, config_file):
config = yaml.load(config_file)
ssh = config['ssh']
host0 = None
vm_undercloud = None
dci_context = dcicontext.build_dci_context(
config['dci']['control_server_url'],
config['dci']['login'],
config['dci']['password'])
logger.setup_logging(dci_context)
status = 'pre-run'
job = dcijob.schedule(dci_context,
remoteci_id=config['dci']['remoteci_id']).json()
job_id = job['job']['id']
try:
if host0_ip:
dcijobstate.create(dci_context, status, 'Reusing existing host0', job_id)
host0 = rdomhelper.host0.Host0(hostname=host0_ip,
user=config['provisioner']['image'].get('user', 'root'),
key_filename=ssh['private_key'])
if undercloud_ip:
dcijobstate.create(dci_context, status, 'Reusing existing undercloud', job_id)
vm_undercloud = undercloud.Undercloud(undercloud_ip,
user='root',
via_ip=host0_ip,
key_filename=ssh['private_key'])
if not host0:
dcijobstate.create(dci_context, status, 'Creating the host0', job_id)
host0 = deploy_host0(os_auth_url, os_username, os_password,
os_tenant_name, config)
if not vm_undercloud:
dcijobstate.create(dci_context, status, 'Creating the undercloud', job_id)
host0.enable_repositories(config['provisioner']['repositories'])
host0.install_nosync()
host0.create_stack_user()
host0.deploy_hypervisor()
vm_undercloud = host0.instack_virt_setup(
config['undercloud']['guest_image_path'],
config['undercloud']['guest_image_checksum'],
rhsm_login=config['rhsm']['login'],
rhsm_password=config['rhsm'].get('password', os.environ.get('RHN_PW')))
status = 'running'
dcijobstate.create(dci_context, status, 'Configuring the undercloud', job_id)
vm_undercloud.enable_repositories(config['undercloud']['repositories'])
vm_undercloud.install_nosync()
vm_undercloud.create_stack_user()
vm_undercloud.install_base_packages()
vm_undercloud.clean_system()
vm_undercloud.update_packages()
vm_undercloud.install_osp()
vm_undercloud.start_overcloud()
dcijobstate.create(dci_context, 'success', 'Job succeed :-)', job_id)
except Exception as e:
LOG.error(traceback.format_exc())
dcijobstate.create(dci_context, 'failure', 'Job failed :-(', job_id)
raise e
# This is for setuptools entry point.
main = cli
|
Gheehnest/three.js
|
refs/heads/master
|
utils/converters/ctm/join_ctm.py
|
399
|
"""Join multiple binary files into single file and generate JSON snippet with offsets
-------------------------------------
How to use
-------------------------------------
python join_ctm.py -i "part_*.ctm" -o joined.ctm [-j offsets.js]
Will read multiple files following wildcard pattern (ordered lexicographically):
part_000.ctm
part_001.ctm
part_002.ctm
...
part_XXX.ctm
And generate single concatenated files:
joined.ctm
offsets.js (optional, offsets are also dumped to standard output)
"""
import getopt
import glob
import sys
import os
# #####################################################
# Templates
# #####################################################
TEMPLATE_JSON = u"""\
"offsets": [ %(offsets)s ],
"""
# #############################################################################
# Helpers
# #############################################################################
def usage():
print 'Usage: %s -i "filename_*.ctm" -o filename.ctm [-j offsets.js]' % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:j:", ["help", "input=", "output=", "json="])
except getopt.GetoptError:
usage()
sys.exit(2)
inpattern = ""
outname = ""
jsonname = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
inpattern = a
elif o in ("-o", "--output"):
outname = a
elif o in ("-j", "--json"):
jsonname = a
# quit if required parameters are missing
if inpattern == "" or outname == "":
usage()
sys.exit(2)
outfile = open(outname, "wb")
matches = glob.glob(inpattern)
matches.sort()
total = 0
offsets = []
for filename in matches:
filesize = os.path.getsize(filename)
offsets.append(total)
total += filesize
print filename, filesize
infile = open(filename, "rb")
buffer = infile.read()
outfile.write(buffer)
infile.close()
outfile.close()
json_str = TEMPLATE_JSON % {
"offsets" : ", ".join(["%d" % o for o in offsets])
}
print json_str
if jsonname:
jsonfile = open(jsonname, "w")
jsonfile.write(json_str)
jsonfile.close()
|
cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools
|
refs/heads/master
|
git_retry.py
|
31
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import subprocess
import sys
import threading
import time
from git_common import GIT_EXE, GIT_TRANSIENT_ERRORS_RE
class TeeThread(threading.Thread):
def __init__(self, fd, out_fd, name):
super(TeeThread, self).__init__(name='git-retry.tee.%s' % (name,))
self.data = None
self.fd = fd
self.out_fd = out_fd
def run(self):
chunks = []
for line in self.fd:
chunks.append(line)
self.out_fd.write(line)
self.data = ''.join(chunks)
class GitRetry(object):
logger = logging.getLogger('git-retry')
DEFAULT_DELAY_SECS = 3.0
DEFAULT_RETRY_COUNT = 5
def __init__(self, retry_count=None, delay=None, delay_factor=None):
self.retry_count = retry_count or self.DEFAULT_RETRY_COUNT
self.delay = max(delay, 0) if delay else 0
self.delay_factor = max(delay_factor, 0) if delay_factor else 0
def shouldRetry(self, stderr):
m = GIT_TRANSIENT_ERRORS_RE.search(stderr)
if not m:
return False
self.logger.info("Encountered known transient error: [%s]",
stderr[m.start(): m.end()])
return True
@staticmethod
def execute(*args):
args = (GIT_EXE,) + args
proc = subprocess.Popen(
args,
stderr=subprocess.PIPE,
)
stderr_tee = TeeThread(proc.stderr, sys.stderr, 'stderr')
# Start our process. Collect/tee 'stdout' and 'stderr'.
stderr_tee.start()
try:
proc.wait()
except KeyboardInterrupt:
proc.kill()
raise
finally:
stderr_tee.join()
return proc.returncode, None, stderr_tee.data
def computeDelay(self, iteration):
"""Returns: the delay (in seconds) for a given iteration
The first iteration has a delay of '0'.
Args:
iteration: (int) The iteration index (starting with zero as the first
iteration)
"""
if (not self.delay) or (iteration == 0):
return 0
if self.delay_factor == 0:
# Linear delay
return iteration * self.delay
# Exponential delay
return (self.delay_factor ** (iteration - 1)) * self.delay
def __call__(self, *args):
returncode = 0
for i in xrange(self.retry_count):
# If the previous run failed and a delay is configured, delay before the
# next run.
delay = self.computeDelay(i)
if delay > 0:
self.logger.info("Delaying for [%s second(s)] until next retry", delay)
time.sleep(delay)
self.logger.debug("Executing subprocess (%d/%d) with arguments: %s",
(i+1), self.retry_count, args)
returncode, _, stderr = self.execute(*args)
self.logger.debug("Process terminated with return code: %d", returncode)
if returncode == 0:
break
if not self.shouldRetry(stderr):
self.logger.error("Process failure was not known to be transient; "
"terminating with return code %d", returncode)
break
return returncode
def main(args):
parser = optparse.OptionParser()
parser.disable_interspersed_args()
parser.add_option('-v', '--verbose',
action='count', default=0,
help="Increase verbosity; can be specified multiple times")
parser.add_option('-c', '--retry-count', metavar='COUNT',
type=int, default=GitRetry.DEFAULT_RETRY_COUNT,
help="Number of times to retry (default=%default)")
parser.add_option('-d', '--delay', metavar='SECONDS',
type=float, default=GitRetry.DEFAULT_DELAY_SECS,
help="Specifies the amount of time (in seconds) to wait "
"between successive retries (default=%default). This "
"can be zero.")
parser.add_option('-D', '--delay-factor', metavar='FACTOR',
type=int, default=2,
help="The exponential factor to apply to delays in between "
"successive failures (default=%default). If this is "
"zero, delays will increase linearly. Set this to "
"one to have a constant (non-increasing) delay.")
opts, args = parser.parse_args(args)
# Configure logging verbosity
if opts.verbose == 0:
logging.getLogger().setLevel(logging.WARNING)
elif opts.verbose == 1:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.DEBUG)
# Execute retries
retry = GitRetry(
retry_count=opts.retry_count,
delay=opts.delay,
delay_factor=opts.delay_factor,
)
return retry(*args)
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.WARNING)
try:
sys.exit(main(sys.argv[2:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
chrrrles/ansible-modules-extras
|
refs/heads/devel
|
system/firewalld.py
|
32
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Adam Miller (maxamillion@fedoraproject.org)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services."
required: false
default: null
port:
description:
- "Name of a port or port range to add/remove to/from firewalld. Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges."
required: false
default: null
rich_rule:
description:
- "Rich rule to add/remove to/from firewalld."
required: false
default: null
source:
description:
- 'The source/network you would like to add/remove to/from firewalld'
required: false
default: null
version_added: "2.0"
zone:
description:
- 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).'
required: false
default: system-default(public)
choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
permanent:
description:
- "Should this configuration be in the running firewalld configuration or persist across reboots."
required: true
immediate:
description:
- "Should this configuration be applied immediately, if set as permanent"
required: false
default: false
version_added: "1.9"
state:
description:
- "Should this port accept(enabled) or reject(disabled) connections."
required: true
choices: [ "enabled", "disabled" ]
timeout:
description:
- "The amount of time the rule should be in effect for when non-permanent."
required: false
default: 0
notes:
- Not tested on any Debian based system.
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = '''
- firewalld: service=https permanent=true state=enabled
- firewalld: port=8081/tcp permanent=true state=disabled
- firewalld: port=161-162/udp permanent=true state=enabled
- firewalld: zone=dmz service=http permanent=true state=enabled
- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled
- firewalld: source='192.168.1.0/24' zone=internal state=enabled
'''
import os
import re
try:
import firewall.config
FW_VERSION = firewall.config.VERSION
from firewall.client import FirewallClient
fw = FirewallClient()
HAS_FIREWALLD = True
except ImportError:
HAS_FIREWALLD = False
################
# port handling
#
def get_port_enabled(zone, port_proto):
if port_proto in fw.getPorts(zone):
return True
else:
return False
def set_port_enabled(zone, port, protocol, timeout):
fw.addPort(zone, port, protocol, timeout)
def set_port_disabled(zone, port, protocol):
fw.removePort(zone, port, protocol)
def get_port_enabled_permanent(zone, port_proto):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if tuple(port_proto) in fw_settings.getPorts():
return True
else:
return False
def set_port_enabled_permanent(zone, port, protocol):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addPort(port, protocol)
fw_zone.update(fw_settings)
def set_port_disabled_permanent(zone, port, protocol):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removePort(port, protocol)
fw_zone.update(fw_settings)
####################
# source handling
#
def get_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if source in fw_settings.getSources():
return True
else:
return False
def add_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addSource(source)
fw_zone.update(fw_settings)
def remove_source(zone, source):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeSource(source)
fw_zone.update(fw_settings)
####################
# service handling
#
def get_service_enabled(zone, service):
if service in fw.getServices(zone):
return True
else:
return False
def set_service_enabled(zone, service, timeout):
fw.addService(zone, service, timeout)
def set_service_disabled(zone, service):
fw.removeService(zone, service)
def get_service_enabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if service in fw_settings.getServices():
return True
else:
return False
def set_service_enabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addService(service)
fw_zone.update(fw_settings)
def set_service_disabled_permanent(zone, service):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeService(service)
fw_zone.update(fw_settings)
####################
# rich rule handling
#
def get_rich_rule_enabled(zone, rule):
if rule in fw.getRichRules(zone):
return True
else:
return False
def set_rich_rule_enabled(zone, rule, timeout):
fw.addRichRule(zone, rule, timeout)
def set_rich_rule_disabled(zone, rule):
fw.removeRichRule(zone, rule)
def get_rich_rule_enabled_permanent(zone, rule):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_rich_rule_enabled_permanent(zone, rule):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.addRichRule(rule)
fw_zone.update(fw_settings)
def set_rich_rule_disabled_permanent(zone, rule):
fw_zone = fw.config().getZoneByName(zone)
fw_settings = fw_zone.getSettings()
fw_settings.removeRichRule(rule)
fw_zone.update(fw_settings)
def main():
module = AnsibleModule(
argument_spec = dict(
service=dict(required=False,default=None),
port=dict(required=False,default=None),
rich_rule=dict(required=False,default=None),
zone=dict(required=False,default=None),
immediate=dict(type='bool',default=False),
source=dict(required=False,default=None),
permanent=dict(type='bool',required=False,default=None),
state=dict(choices=['enabled', 'disabled'], required=True),
timeout=dict(type='int',required=False,default=0),
),
supports_check_mode=True
)
if module.params['source'] == None and module.params['permanent'] == None:
module.fail(msg='permanent is a required parameter')
if not HAS_FIREWALLD:
module.fail_json(msg='firewalld required for this module')
## Pre-run version checking
if FW_VERSION < "0.2.11":
module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11')
## Global Vars
changed=False
msgs = []
service = module.params['service']
rich_rule = module.params['rich_rule']
source = module.params['source']
if module.params['port'] != None:
port, protocol = module.params['port'].split('/')
if protocol == None:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
if module.params['zone'] != None:
zone = module.params['zone']
else:
zone = fw.getDefaultZone()
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
## Check for firewalld running
try:
if fw.connected == False:
module.fail_json(msg='firewalld service must be running')
except AttributeError:
module.fail_json(msg="firewalld connection can't be established,\
version likely too old. Requires firewalld >= 2.0.11")
modification_count = 0
if service != None:
modification_count += 1
if port != None:
modification_count += 1
if rich_rule != None:
modification_count += 1
if modification_count > 1:
module.fail_json(msg='can only operate on port, service or rich_rule at once')
if service != None:
if permanent:
is_enabled = get_service_enabled_permanent(zone, service)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_service_enabled_permanent(zone, service)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_service_disabled_permanent(zone, service)
changed=True
if immediate or not permanent:
is_enabled = get_service_enabled(zone, service)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_service_enabled(zone, service, timeout)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_service_disabled(zone, service)
changed=True
if changed == True:
msgs.append("Changed service %s to %s" % (service, desired_state))
if source != None:
is_enabled = get_source(zone, source)
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
add_source(zone, source)
changed=True
msgs.append("Added %s to zone %s" % (source, zone))
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
remove_source(zone, source)
changed=True
msgs.append("Removed %s from zone %s" % (source, zone))
if port != None:
if permanent:
is_enabled = get_port_enabled_permanent(zone, [port, protocol])
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_port_enabled_permanent(zone, port, protocol)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_port_disabled_permanent(zone, port, protocol)
changed=True
if immediate or not permanent:
is_enabled = get_port_enabled(zone, [port,protocol])
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_port_enabled(zone, port, protocol, timeout)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_port_disabled(zone, port, protocol)
changed=True
if changed == True:
msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \
desired_state))
if rich_rule != None:
if permanent:
is_enabled = get_rich_rule_enabled_permanent(zone, rich_rule)
msgs.append('Permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_enabled_permanent(zone, rich_rule)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_disabled_permanent(zone, rich_rule)
changed=True
if immediate or not permanent:
is_enabled = get_rich_rule_enabled(zone, rich_rule)
msgs.append('Non-permanent operation')
if desired_state == "enabled":
if is_enabled == False:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_enabled(zone, rich_rule, timeout)
changed=True
elif desired_state == "disabled":
if is_enabled == True:
if module.check_mode:
module.exit_json(changed=True)
set_rich_rule_disabled(zone, rich_rule)
changed=True
if changed == True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
module.exit_json(changed=changed, msg=', '.join(msgs))
#################################################
# import module snippets
from ansible.module_utils.basic import *
main()
|
svenstaro/ansible
|
refs/heads/devel
|
test/units/plugins/__init__.py
|
12133432
| |
gacarrillor/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsproviderconnection_mssql.py
|
19
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for MSSQL QgsAbastractProviderConnection API.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '12/03/2020'
__copyright__ = 'Copyright 2019, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from test_qgsproviderconnection_base import TestPyQgsProviderConnectionBase
from qgis.core import (
QgsVectorLayer,
QgsProviderRegistry,
QgsDataSourceUri,
)
from qgis.testing import unittest
class TestPyQgsProviderConnectionMssql(unittest.TestCase, TestPyQgsProviderConnectionBase):
# Provider test cases must define the string URI for the test
uri = ''
# Provider test cases must define the provider name (e.g. "postgres" or "ogr")
providerKey = 'mssql'
@classmethod
def setUpClass(cls):
"""Run before all tests"""
TestPyQgsProviderConnectionBase.setUpClass()
# These are the connection details for the SQL Server instance running on Travis
cls.dbconn = "service='testsqlserver' user=sa password='<YourStrong!Passw0rd>' "
if 'QGIS_MSSQLTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_MSSQLTEST_DB']
cls.uri = cls.dbconn
try:
md = QgsProviderRegistry.instance().providerMetadata('mssql')
conn = md.createConnection(cls.uri, {})
conn.executeSql('drop schema [myNewSchema]')
except:
pass
def test_configuration(self):
"""Test storage and retrieval for configuration parameters"""
uri = 'dbname=\'qgis_test\' service=\'driver={SQL Server};server=localhost;port=1433;database=qgis_test\' user=\'sa\' password=\'<YourStrong!Passw0rd>\' srid=4326 type=Point estimatedMetadata=\'true\' disableInvalidGeometryHandling=\'1\' table="qgis_test"."someData" (geom)'
md = QgsProviderRegistry.instance().providerMetadata('mssql')
conn = md.createConnection(uri, {})
ds_uri = QgsDataSourceUri(conn.uri())
self.assertEqual(ds_uri.username(), 'sa')
self.assertEqual(ds_uri.database(), 'qgis_test')
self.assertEqual(ds_uri.table(), '')
self.assertEqual(ds_uri.schema(), '')
self.assertEqual(ds_uri.geometryColumn(), '')
self.assertTrue(ds_uri.useEstimatedMetadata())
self.assertEqual(ds_uri.srid(), '')
self.assertEqual(ds_uri.password(), '<YourStrong!Passw0rd>')
self.assertEqual(ds_uri.param('disableInvalidGeometryHandling'), '1')
conn.store('coronavirus')
conn = md.findConnection('coronavirus', False)
ds_uri = QgsDataSourceUri(conn.uri())
self.assertEqual(ds_uri.username(), 'sa')
self.assertEqual(ds_uri.database(), 'qgis_test')
self.assertEqual(ds_uri.table(), '')
self.assertEqual(ds_uri.schema(), '')
self.assertTrue(ds_uri.useEstimatedMetadata())
self.assertEqual(ds_uri.geometryColumn(), '')
self.assertEqual(ds_uri.srid(), '')
self.assertEqual(ds_uri.password(), '<YourStrong!Passw0rd>')
self.assertEqual(ds_uri.param('disableInvalidGeometryHandling'), 'true')
conn.remove('coronavirus')
def test_mssql_connections_from_uri(self):
"""Create a connection from a layer uri and retrieve it"""
md = QgsProviderRegistry.instance().providerMetadata('mssql')
def test_table_uri(self):
"""Create a connection from a layer uri and create a table URI"""
md = QgsProviderRegistry.instance().providerMetadata('mssql')
conn = md.createConnection(self.uri, {})
vl = QgsVectorLayer(conn.tableUri('qgis_test', 'someData'), 'my', 'mssql')
self.assertTrue(vl.isValid())
def test_mssql_fields(self):
"""Test fields"""
md = QgsProviderRegistry.instance().providerMetadata('mssql')
conn = md.createConnection(self.uri, {})
fields = conn.fields('qgis_test', 'someData')
self.assertEqual(fields.names(), ['pk', 'cnt', 'name', 'name2', 'num_char', 'dt', 'date', 'time'])
def test_schemas_filtering(self):
"""Test schemas filtering"""
md = QgsProviderRegistry.instance().providerMetadata('mssql')
conn = md.createConnection(self.uri, {})
schemas = conn.schemas()
self.assertEqual(len(schemas), 2)
self.assertEqual(schemas, ['dbo', 'qgis_test'])
filterUri = QgsDataSourceUri(self.uri)
filterUri.setParam('excludedSchemas', 'dbo')
conn = md.createConnection(filterUri.uri(), {})
schemas = conn.schemas()
self.assertEqual(len(schemas), 1)
self.assertEqual(schemas, ['qgis_test'])
# Store the connection
conn.store('filteredConnection')
otherConn = md.createConnection('filteredConnection')
schemas = otherConn.schemas()
self.assertEqual(len(schemas), 1)
self.assertEqual(schemas, ['qgis_test'])
if __name__ == '__main__':
unittest.main()
|
briandrawert/pyurdme
|
refs/heads/master
|
examples/yeast_polarization/polarisome_model.py
|
5
|
#!/usr/bin/env python
""" pyURDME model file for the model found in Lawson et al. PloS Comp Bio (2013). """
import os
import pyurdme
import dolfin
import math
import matplotlib.pyplot as plt
import numpy
class Cdc42(pyurdme.URDMEDataFunction):
def __init__(self, a=-4*numpy.pi, b=4*numpy.pi, N=160):
""" 1D domain from a to b. """
pyurdme.URDMEDataFunction.__init__(self, name="Cdc42")
self.a = a
self.b = b
self.N = N
def map(self, x):
#ligand_c[i] = 100*Gradient_max*exp( (-1*pow((i-floor(N/2))*360.0/N,2))/(2*pow(Gradient_sigma,2)) );
# x[0] == i*l
Gradient_max = 3.0*160/self.N
Gradient_max = Gradient_max*0.7917
Gradient_sigma = 20.3837
return 100*Gradient_max*numpy.exp(
-1*((x[0]*(360)/(self.b - self.a))**2) / (2*Gradient_sigma**2)
)
class polarisome_1D(pyurdme.URDMEModel):
def __init__(self,model_name="polarisome_1D"):
pyurdme.URDMEModel.__init__(self,model_name)
default_D = 0.0053
fast_D = 1000*default_D
# Species
Bni1c = pyurdme.Species(name="Bni1c", diffusion_constant=fast_D)
Bni1m = pyurdme.Species(name="Bni1m", diffusion_constant=default_D)
Spa2c = pyurdme.Species(name="Spa2c", diffusion_constant=fast_D)
Spa2m = pyurdme.Species(name="Spa2m", diffusion_constant=default_D)
Actinc = pyurdme.Species(name="Actinc", diffusion_constant=fast_D)
Actinm = pyurdme.Species(name="Actinm", diffusion_constant=default_D)
self.add_species([Bni1c, Bni1m, Spa2c, Spa2m, Actinc, Actinm])
NUM_VOXEL = 160
self.mesh = pyurdme.URDMEMesh.generate_interval_mesh(nx=NUM_VOXEL, a=-4*numpy.pi, b=4*numpy.pi, periodic=True)
Bon = pyurdme.Parameter(name="Bon", expression=1.6e-6)
Boff = pyurdme.Parameter(name="Boff", expression=0.25)
Bfb = pyurdme.Parameter(name="Bfb", expression=1.9e-5)
Aon = pyurdme.Parameter(name="Aon", expression=7.7e-5)
Aoff = pyurdme.Parameter(name="Aoff", expression=0.018)
Km = pyurdme.Parameter(name="Km", expression=3500)
Son = pyurdme.Parameter(name="Son", expression=0.16)
Soff = pyurdme.Parameter(name="Soff", expression=0.35)
self.add_parameter([Bon, Boff, Bfb, Aon, Aoff, Km, Son, Soff])
# Add Data Function to model the mating pheromone gradient.
self.add_data_function(Cdc42())
# Reactions
R0 = pyurdme.Reaction(name="R0", reactants={Bni1c:1}, products={Bni1m:1}, propensity_function="Bon*Bni1c*NUM_VOXELS*Cdc42")
R1 = pyurdme.Reaction(name="R1", reactants={Bni1m:1}, products={Bni1c:1}, massaction=True, rate=Boff)
R2 = pyurdme.Reaction(name="R2", reactants={Actinc:1}, products={Actinm:1}, propensity_function="Aon*Bni1m*Actinc*NUM_VOXELS")
R3 = pyurdme.Reaction(name="R3", reactants={Actinm:1}, products={Actinc:1}, propensity_function="Aoff*Km/(Km+Spa2m)*Actinm")
R4 = pyurdme.Reaction(name="R4", reactants={Spa2c:1}, products={Spa2m:1}, propensity_function="Son*Spa2c*NUM_VOXELS*Actinm")
R5 = pyurdme.Reaction(name="R5", reactants={Spa2m:1}, products={Spa2c:1}, massaction=True, rate=Soff)
R6 = pyurdme.Reaction(name="R6", reactants={Bni1c:1}, products={Bni1m:1}, propensity_function="Bfb*Bni1c*NUM_VOXELS*Spa2m")
self.add_reaction([R0,R1,R2,R3,R4,R5,R6])
# Distribute molecules randomly over the mesh according to their initial values
self.set_initial_condition_scatter({Bni1c:1000})
self.set_initial_condition_scatter({Spa2c:5000})
self.set_initial_condition_scatter({Actinc:40})
#self.timespan(range(0,3601,30))
self.timespan(range(0,201,10))
if __name__=="__main__":
""" Dump model to a file. """
model = polarisome_1D()
result = model.run()
x_vals = model.mesh.coordinates()[:, 0]
Bni1 = result.get_species("Bni1m", timepoints=20)
Spa2 = result.get_species("Spa2m", timepoints=20)
plt.plot(x_vals, Spa2)
plt.title('Spa2_m at t={0}'.format(model.tspan[20]))
plt.show()
|
lshain-android-source/external-chromium_org-tools-gyp
|
refs/heads/master
|
test/same-source-file-name/gyptest-fail-shared.py
|
149
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Checks that gyp fails on shared_library targets which have several files with
the same basename.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('double-shared.gyp', chdir='src', status=1, stderr=None)
test.pass_test()
|
dforsyth/mesos
|
refs/heads/master
|
src/python/interface/src/mesos/interface/__init__.py
|
7
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# See include/mesos/scheduler.hpp, include/mesos/executor.hpp and
# include/mesos/mesos.proto for more information documenting this
# interface.
"""Python bindings for Mesos."""
from __future__ import print_function
import sys
__all__ = (
'Executor',
'ExecutorDriver'
'Scheduler',
'SchedulerDriver',
)
class Scheduler(object):
"""
Base class for Mesos schedulers. Users' schedulers should extend this
class to get default implementations of methods they don't override.
"""
def registered(self, driver, frameworkId, masterInfo):
"""
Invoked when the scheduler successfully registers with a Mesos master.
It is called with the frameworkId, a unique ID generated by the
master, and the masterInfo which is information about the master
itself.
"""
def reregistered(self, driver, masterInfo):
"""
Invoked when the scheduler reregisters with a newly elected Mesos
master. This is only called when the scheduler has previously been
registered. masterInfo contains information about the newly elected
master.
"""
def disconnected(self, driver):
"""
Invoked when the scheduler becomes disconnected from the master, e.g.
the master fails and another is taking over.
"""
def resourceOffers(self, driver, offers):
"""
Invoked when resources have been offered to this framework. A single
offer will only contain resources from a single slave. Resources
associated with an offer will not be re-offered to _this_ framework
until either (a) this framework has rejected those resources (see
SchedulerDriver.launchTasks) or (b) those resources have been
rescinded (see Scheduler.offerRescinded). Note that resources may be
concurrently offered to more than one framework at a time (depending
on the allocator being used). In that case, the first framework to
launch tasks using those resources will be able to use them while the
other frameworks will have those resources rescinded (or if a
framework has already launched tasks with those resources then those
tasks will fail with a TASK_LOST status and a message saying as much).
"""
def offerRescinded(self, driver, offerId):
"""
Invoked when an offer is no longer valid (e.g., the slave was lost or
another framework used resources in the offer.) If for whatever reason
an offer is never rescinded (e.g., dropped message, failing over
framework, etc.), a framework that attempts to launch tasks using an
invalid offer will receive TASK_LOST status updates for those tasks
(see Scheduler.resourceOffers).
"""
def statusUpdate(self, driver, status):
"""
Invoked when the status of a task has changed (e.g., a slave is
lost and so the task is lost, a task finishes and an executor
sends a status update saying so, etc). If implicit
acknowledgements are being used, then returning from this
callback _acknowledges_ receipt of this status update! If for
whatever reason the scheduler aborts during this callback (or
the process exits) another status update will be delivered (note,
however, that this is currently not true if the slave sending the
status update is lost/fails during that time). If explicit
acknowledgements are in use, the scheduler must acknowledge this
status on the driver.
"""
def frameworkMessage(self, driver, executorId, slaveId, message):
"""
Invoked when an executor sends a message. These messages are best
effort; do not expect a framework message to be retransmitted in any
reliable fashion.
"""
def slaveLost(self, driver, slaveId):
"""
Invoked when a slave has been determined unreachable (e.g., machine
failure, network partition.) Most frameworks will need to reschedule
any tasks launched on this slave on a new slave.
NOTE: This callback is not reliably delivered. If a host or
network failure causes messages between the master and the
scheduler to be dropped, this callback may not be invoked.
"""
def executorLost(self, driver, executorId, slaveId, status):
"""
Invoked when an executor has exited/terminated. Note that any tasks
running will have TASK_LOST status updates automatically generated.
NOTE: This callback is not reliably delivered. If a host or
network failure causes messages between the master and the
scheduler to be dropped, this callback may not be invoked.
"""
def error(self, driver, message):
"""
Invoked when there is an unrecoverable error in the scheduler or
scheduler driver. The driver will be aborted BEFORE invoking this
callback.
"""
print("Error from Mesos: %s " % message, file=sys.stderr)
class SchedulerDriver(object):
"""
Interface for Mesos scheduler drivers. Users may wish to implement this
class in mock objects for tests.
"""
def start(self):
"""
Starts the scheduler driver. This needs to be called before any other
driver calls are made.
"""
def stop(self, failover=False):
"""
Stops the scheduler driver. If the 'failover' flag is set to False
then it is expected that this framework will never reconnect to Mesos
and all of its executors and tasks can be terminated. Otherwise, all
executors and tasks will remain running (for some framework specific
failover timeout) allowing the scheduler to reconnect (possibly in the
same process, or from a different process, for example, on a different
machine.)
"""
def abort(self):
"""
Aborts the driver so that no more callbacks can be made to the
scheduler. The semantics of abort and stop have deliberately been
separated so that code can detect an aborted driver (i.e., via the
return status of SchedulerDriver.join), and instantiate and start
another driver if desired (from within the same process.)
"""
def join(self):
"""
Waits for the driver to be stopped or aborted, possibly blocking the
current thread indefinitely. The return status of this function can
be used to determine if the driver was aborted (see mesos.proto for a
description of Status).
"""
def run(self):
"""
Starts and immediately joins (i.e., blocks on) the driver.
"""
def requestResources(self, requests):
"""
Requests resources from Mesos (see mesos.proto for a description of
Request and how, for example, to request resources from specific
slaves.) Any resources available are offered to the framework via
Scheduler.resourceOffers callback, asynchronously.
"""
def launchTasks(self, offerIds, tasks, filters=None):
"""
Launches the given set of tasks. Any remaining resources (i.e.,
those that are not used by the launched tasks or their executors)
will be considered declined. Note that this includes resources
used by tasks that the framework attempted to launch but failed
(with TASK_ERROR) due to a malformed task description. The
specified filters are applied on all unused resources (see
mesos.proto for a description of Filters). Available resources
are aggregated when multiple offers are provided. Note that all
offers must belong to the same slave. Invoking this function with
an empty collection of tasks declines offers in their entirety
(see Scheduler.declineOffer). Note that passing a single offer
is also supported.
"""
def killTask(self, taskId):
"""
Kills the specified task. Note that attempting to kill a task is
currently not reliable. If, for example, a scheduler fails over while
it was attempting to kill a task it will need to retry in the future.
Likewise, if unregistered / disconnected, the request will be dropped
dropped (these semantics may be changed in the future).
"""
def acceptOffers(self, offerIds, operations, filters=None):
"""
Accepts the given offers and performs a sequence of operations on
those accepted offers. See Offer.Operation in mesos.proto for the
set of available operations. Any remaining resources (i.e., those
that are not used by the launched tasks or their executors) will
be considered declined. Note that this includes resources used by
tasks that the framework attempted to launch but failed (with
TASK_ERROR) due to a malformed task description. The specified
filters are applied on all unused resources (see mesos.proto for
a description of Filters). Available resources are aggregated
when multiple offers are provided. Note that all offers must
belong to the same slave.
"""
def declineOffer(self, offerId, filters=None):
"""
Declines an offer in its entirety and applies the specified
filters on the resources (see mesos.proto for a description of
Filters). Note that this can be done at any time, it is not
necessary to do this within the Scheduler::resourceOffers
callback.
"""
def reviveOffers(self, roles=None):
"""
Removes filters either for all roles of the framework (if 'roles'
is None) or for the specified roles and removes these roles from
the suppressed set. If the framework is not connected to the master,
an up-to-date set of suppressed roles will be sent to the master
during re-registration.
NOTE: If 'roles' is an empty iterable, this method does nothing.
"""
def suppressOffers(self, roles=None):
"""
Informs Mesos master to stop sending offers either for all roles
of the framework (if 'roles' is None) or for the specified 'roles'
of the framework (i.e. to suppress these roles). To resume getting
offers, the scheduler can call reviveOffers() or set the suppressed
roles explicitly via updateFramework().
NOTE: If the framework is not connected to the master, an up-to-date set
of suppressed roles will be sent to the master during re-registration.
NOTE: If `roles` is an empty iterable, this method does nothing.
"""
def acknowledgeStatusUpdate(self, status):
"""
Acknowledges the status update. This should only be called
once the status update is processed durably by the scheduler.
Not that explicit acknowledgements must be requested via the
constructor argument, otherwise a call to this method will
cause the driver to crash.
"""
def sendFrameworkMessage(self, executorId, slaveId, data):
"""
Sends a message from the framework to one of its executors. These
messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion.
"""
def reconcileTasks(self, tasks):
"""
Allows the framework to query the status for non-terminal tasks.
This causes the master to send back the latest task status for
each task in 'statuses', if possible. Tasks that are no longer
known will result in a TASK_LOST update. If statuses is empty,
then the master will send the latest status for each task
currently known.
"""
def updateFramework(self, frameworkInfo, suppressedRoles):
"""
Inform Mesos master about changes to the `FrameworkInfo` and
the set of suppressed roles. The driver will store the new
`FrameworkInfo` and the new set of suppressed roles, and all
subsequent re-registrations will use them.
NOTE: If the supplied info is invalid or fails authorization,
the `error()` callback will be invoked asynchronously (after
the master replies with a `FrameworkErrorMessage`).
NOTE: This must be called after initial registration with the
master completes and the `FrameworkID` is assigned. The assigned
`FrameworkID` must be set in `frameworkInfo`.
NOTE: The `FrameworkInfo.user` and `FrameworkInfo.hostname`
fields will be auto-populated using the same approach used
during driver initialization.
"""
class Executor(object):
"""
Base class for Mesos executors. Users' executors should extend this
class to get default implementations of methods they don't override.
"""
def registered(self, driver, executorInfo, frameworkInfo, slaveInfo):
"""
Invoked once the executor driver has been able to successfully connect
with Mesos. In particular, a scheduler can pass some data to its
executors through the FrameworkInfo.ExecutorInfo's data field.
"""
def reregistered(self, driver, slaveInfo):
"""
Invoked when the executor reregisters with a restarted slave.
"""
def disconnected(self, driver):
"""
Invoked when the executor becomes "disconnected" from the slave (e.g.,
the slave is being restarted due to an upgrade).
"""
def launchTask(self, driver, task):
"""
Invoked when a task has been launched on this executor (initiated via
Scheduler.launchTasks). Note that this task can be realized with a
thread, a process, or some simple computation, however, no other
callbacks will be invoked on this executor until this callback has
returned.
"""
def killTask(self, driver, taskId):
"""
Invoked when a task running within this executor has been killed (via
SchedulerDriver.killTask). Note that no status update will be sent on
behalf of the executor, the executor is responsible for creating a new
TaskStatus (i.e., with TASK_KILLED) and invoking ExecutorDriver's
sendStatusUpdate.
"""
def frameworkMessage(self, driver, message):
"""
Invoked when a framework message has arrived for this executor. These
messages are best effort; do not expect a framework message to be
retransmitted in any reliable fashion.
"""
def shutdown(self, driver):
"""
Invoked when the executor should terminate all of its currently
running tasks. Note that after Mesos has determined that an executor
has terminated any tasks that the executor did not send terminal
status updates for (e.g., TASK_KILLED, TASK_FINISHED, TASK_FAILED,
etc) a TASK_LOST status update will be created.
"""
def error(self, driver, message):
"""
Invoked when a fatal error has occurred with the executor and/or
executor driver. The driver will be aborted BEFORE invoking this
callback.
"""
print("Error from Mesos: %s" % message, file=sys.stderr)
class ExecutorDriver(object):
"""
Interface for Mesos executor drivers. Users may wish to extend this
class in mock objects for tests.
"""
def start(self):
"""
Starts the executor driver. This needs to be called before any other
driver calls are made.
"""
def stop(self):
"""
Stops the executor driver.
"""
def abort(self):
"""
Aborts the driver so that no more callbacks can be made to the
executor. The semantics of abort and stop have deliberately been
separated so that code can detect an aborted driver (i.e., via the
return status of ExecutorDriver.join), and instantiate and start
another driver if desired (from within the same process, although this
functionality is currently not supported for executors).
"""
def join(self):
"""
Waits for the driver to be stopped or aborted, possibly blocking the
current thread indefinitely. The return status of this function can
be used to determine if the driver was aborted (see mesos.proto for a
description of Status).
"""
def run(self):
"""
Starts and immediately joins (i.e., blocks on) the driver.
"""
def sendStatusUpdate(self, status):
"""
Sends a status update to the framework scheduler, retrying as
necessary until an acknowledgement has been received or the executor
is terminated (in which case, a TASK_LOST status update will be sent).
See Scheduler.statusUpdate for more information about status update
acknowledgements.
"""
def sendFrameworkMessage(self, data):
"""
Sends a message to the framework scheduler. These messages are best
effort; do not expect a framework message to be retransmitted in any
reliable fashion.
"""
|
tayfun/django
|
refs/heads/master
|
tests/null_fk_ordering/tests.py
|
381
|
from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Author, Comment, Forum, Post, SystemInfo
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
Article.objects.create(title='No author on this article')
Article.objects.create(author=author_1, title='This article written by Tom Jones')
Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertEqual(len(list(Article.objects.all())), 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
Comment.objects.create(post=p, comment_text='My first comment')
Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
Comment.objects.create(comment_text='Another first comment')
Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertEqual(len(list(Comment.objects.all())), 4)
|
ivan-fedorov/intellij-community
|
refs/heads/master
|
python/testData/completion/dunderNext.py
|
79
|
class C:
def __nex<caret>
|
onecloud/neutron
|
refs/heads/master
|
neutron/tests/unit/brocade/test_brocade_plugin.py
|
38
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.extensions import portbindings
from neutron.openstack.common import importutils
from neutron.plugins.brocade import NeutronPlugin as brocade_plugin
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit import test_db_plugin as test_plugin
PLUGIN_NAME = ('neutron.plugins.brocade.'
'NeutronPlugin.BrocadePluginV2')
NOS_DRIVER = ('neutron.plugins.brocade.'
'nos.fake_nosdriver.NOSdriver')
FAKE_IPADDRESS = '2.2.2.2'
FAKE_USERNAME = 'user'
FAKE_PASSWORD = 'password'
FAKE_PHYSICAL_INTERFACE = 'em1'
class BrocadePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
def mocked_brocade_init(self):
self._switch = {'address': FAKE_IPADDRESS,
'username': FAKE_USERNAME,
'password': FAKE_PASSWORD
}
self._driver = importutils.import_object(NOS_DRIVER)
with mock.patch.object(brocade_plugin.BrocadePluginV2,
'brocade_init', new=mocked_brocade_init):
super(BrocadePluginV2TestCase, self).setUp(self._plugin_name)
class TestBrocadeBasicGet(test_plugin.TestBasicGet,
BrocadePluginV2TestCase):
pass
class TestBrocadeV2HTTPResponse(test_plugin.TestV2HTTPResponse,
BrocadePluginV2TestCase):
pass
class TestBrocadePortsV2(test_plugin.TestPortsV2,
BrocadePluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_BRIDGE
HAS_PORT_FILTER = True
class TestBrocadeNetworksV2(test_plugin.TestNetworksV2,
BrocadePluginV2TestCase):
pass
|
andresgz/django
|
refs/heads/master
|
tests/fixtures_regress/tests.py
|
83
|
# -*- coding: utf-8 -*-
# Unittests for fixtures.
from __future__ import unicode_literals
import json
import os
import re
import unittest
import warnings
import django
from django.core import management, serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import CommandError
from django.core.serializers.base import DeserializationError
from django.db import IntegrityError, transaction
from django.db.models import signals
from django.test import (
TestCase, TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.utils import six
from django.utils._os import upath
from django.utils.six import PY3, StringIO
from .models import (
Absolute, Animal, Article, Book, Child, Circle1, Circle2, Circle3,
ExternalDependency, M2MCircular1ThroughAB, M2MCircular1ThroughBC,
M2MCircular1ThroughCA, M2MCircular2ThroughAB, M2MComplexA, M2MComplexB,
M2MComplexCircular1A, M2MComplexCircular1B, M2MComplexCircular1C,
M2MComplexCircular2A, M2MComplexCircular2B, M2MSimpleA, M2MSimpleB,
M2MSimpleCircularA, M2MSimpleCircularB, M2MThroughAB, NKChild, Parent,
Person, RefToNKChild, Store, Stuff, Thingy, Widget,
)
_cur_dir = os.path.dirname(os.path.abspath(upath(__file__)))
def is_ascii(s):
return all(ord(c) < 128 for c in s)
skipIfNonASCIIPath = unittest.skipIf(
not is_ascii(django.__file__) and six.PY2,
'Python 2 crashes when checking non-ASCII exception messages.'
)
class TestFixtures(TestCase):
def animal_pre_save_check(self, signal, sender, instance, **kwargs):
self.pre_save_checks.append(
(
'Count = %s (%s)' % (instance.count, type(instance.count)),
'Weight = %s (%s)' % (instance.weight, type(instance.weight)),
)
)
def test_duplicate_pk(self):
"""
This is a regression test for ticket #3790.
"""
# Load a fixture that uses PK=1
management.call_command(
'loaddata',
'sequence',
verbosity=0,
)
# Create a new animal. Without a sequence reset, this new object
# will take a PK of 1 (on Postgres), and the save will fail.
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2,
)
animal.save()
self.assertGreater(animal.id, 1)
def test_loaddata_not_found_fields_not_ignore(self):
"""
Test for ticket #9279 -- Error is raised for entries in
the serialized data for fields that have been removed
from the database when not ignored.
"""
with self.assertRaises(DeserializationError):
management.call_command(
'loaddata',
'sequence_extra',
verbosity=0,
)
def test_loaddata_not_found_fields_ignore(self):
"""
Test for ticket #9279 -- Ignores entries in
the serialized data for fields that have been removed
from the database.
"""
management.call_command(
'loaddata',
'sequence_extra',
ignore=True,
verbosity=0,
)
self.assertEqual(Animal.specimens.all()[0].name, 'Lion')
def test_loaddata_not_found_fields_ignore_xml(self):
"""
Test for ticket #19998 -- Ignore entries in the XML serialized data
for fields that have been removed from the model definition.
"""
management.call_command(
'loaddata',
'sequence_extra_xml',
ignore=True,
verbosity=0,
)
self.assertEqual(Animal.specimens.all()[0].name, 'Wolf')
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
)
self.assertEqual(Stuff.objects.all()[0].name, None)
self.assertEqual(Stuff.objects.all()[0].owner, None)
@skipUnlessDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml_empty_strings(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
)
self.assertEqual(Stuff.objects.all()[0].name, '')
self.assertEqual(Stuff.objects.all()[0].owner, None)
def test_absolute_path(self):
"""
Regression test for ticket #6436 --
os.path.join will throw away the initial parts of a path if it
encounters an absolute path.
This means that if a fixture is specified as an absolute path,
we need to make sure we don't discover the absolute path in every
fixture directory.
"""
load_absolute_path = os.path.join(
os.path.dirname(upath(__file__)),
'fixtures',
'absolute.json'
)
management.call_command(
'loaddata',
load_absolute_path,
verbosity=0,
)
self.assertEqual(Absolute.objects.count(), 1)
def test_relative_path(self, path=['fixtures', 'absolute.json']):
relative_path = os.path.join(*path)
cwd = os.getcwd()
try:
os.chdir(_cur_dir)
management.call_command(
'loaddata',
relative_path,
verbosity=0,
)
finally:
os.chdir(cwd)
self.assertEqual(Absolute.objects.count(), 1)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1')])
def test_relative_path_in_fixture_dirs(self):
self.test_relative_path(path=['inner', 'absolute.json'])
def test_path_containing_dots(self):
management.call_command(
'loaddata',
'path.containing.dots.json',
verbosity=0,
)
self.assertEqual(Absolute.objects.count(), 1)
def test_unknown_format(self):
"""
Test for ticket #4371 -- Loading data of an unknown format should fail
Validate that error conditions are caught correctly
"""
with six.assertRaisesRegex(self, management.CommandError,
"Problem installing fixture 'bad_fixture1': "
"unkn is not a known serialization format."):
management.call_command(
'loaddata',
'bad_fixture1.unkn',
verbosity=0,
)
@skipIfNonASCIIPath
@override_settings(SERIALIZATION_MODULES={'unkn': 'unexistent.path'})
def test_unimportable_serializer(self):
"""
Test that failing serializer import raises the proper error
"""
with six.assertRaisesRegex(self, ImportError,
r"No module named.*unexistent"):
management.call_command(
'loaddata',
'bad_fixture1.unkn',
verbosity=0,
)
def test_invalid_data(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
using explicit filename.
Test for ticket #18213 -- warning conditions are caught correctly
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'bad_fixture2.xml',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(
str(warning.message),
"No fixture data found for 'bad_fixture2'. (File format may be invalid.)"
)
def test_invalid_data_no_ext(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
without file extension.
Test for ticket #18213 -- warning conditions are caught correctly
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'bad_fixture2',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(
str(warning.message),
"No fixture data found for 'bad_fixture2'. (File format may be invalid.)"
)
def test_empty(self):
"""
Test for ticket #18213 -- Loading a fixture file with no data output a warning.
Previously empty fixture raises an error exception, see ticket #4371.
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'empty',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(str(warning.message),
"No fixture data found for 'empty'. (File format may be invalid.)")
def test_error_message(self):
"""
Regression for #9011 - error message is correct.
Change from error to warning for ticket #18213.
"""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
management.call_command(
'loaddata',
'bad_fixture2',
'animal',
verbosity=0,
)
warning = warning_list.pop()
self.assertEqual(warning.category, RuntimeWarning)
self.assertEqual(
str(warning.message),
"No fixture data found for 'bad_fixture2'. (File format may be invalid.)"
)
def test_pg_sequence_resetting_checks(self):
"""
Test for ticket #7565 -- PostgreSQL sequence resetting checks shouldn't
ascend to parent models when inheritance is used
(since they are treated individually).
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
)
self.assertEqual(Parent.objects.all()[0].id, 1)
self.assertEqual(Child.objects.all()[0].id, 1)
def test_close_connection_after_loaddata(self):
"""
Test for ticket #7572 -- MySQL has a problem if the same connection is
used to create tables, load data, and then query over that data.
To compensate, we close the connection after running loaddata.
This ensures that a new connection is opened when test queries are
issued.
"""
management.call_command(
'loaddata',
'big-fixture.json',
verbosity=0,
)
articles = Article.objects.exclude(id=9)
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
# Just for good measure, run the same query again.
# Under the influence of ticket #7572, this will
# give a different result to the previous call.
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
def test_field_value_coerce(self):
"""
Test for tickets #8298, #9942 - Field values should be coerced into the
correct type by the deserializer, not as part of the database write.
"""
self.pre_save_checks = []
signals.pre_save.connect(self.animal_pre_save_check)
try:
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
)
self.assertEqual(
self.pre_save_checks,
[
("Count = 42 (<%s 'int'>)" % ('class' if PY3 else 'type'),
"Weight = 1.2 (<%s 'float'>)" % ('class' if PY3 else 'type'))
]
)
finally:
signals.pre_save.disconnect(self.animal_pre_save_check)
def test_dumpdata_uses_default_manager(self):
"""
Regression for #11286
Ensure that dumpdata honors the default manager
Dump the current contents of the database as a JSON fixture
"""
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
)
management.call_command(
'loaddata',
'sequence.json',
verbosity=0,
)
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2,
)
animal.save()
out = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.animal',
format='json',
stdout=out,
)
# Output order isn't guaranteed, so check for parts
data = out.getvalue()
# Get rid of artifacts like '000000002' to eliminate the differences
# between different Python versions.
data = re.sub('0{6,}[0-9]', '', data)
animals_data = sorted([
{
"pk": 1, "model": "fixtures_regress.animal",
"fields": {"count": 3, "weight": 1.2, "name": "Lion", "latin_name": "Panthera leo"}
},
{
"pk": 10, "model": "fixtures_regress.animal",
"fields": {"count": 42, "weight": 1.2, "name": "Emu", "latin_name": "Dromaius novaehollandiae"}
},
{
"pk": animal.pk, "model": "fixtures_regress.animal",
"fields": {"count": 2, "weight": 2.2, "name": "Platypus", "latin_name": "Ornithorhynchus anatinus"}
},
], key=lambda x: x["pk"])
data = sorted(json.loads(data), key=lambda x: x["pk"])
self.maxDiff = 1024
self.assertEqual(data, animals_data)
def test_proxy_model_included(self):
"""
Regression for #11428 - Proxy models aren't included when you dumpdata
"""
out = StringIO()
# Create an instance of the concrete class
widget = Widget.objects.create(name='grommet')
management.call_command(
'dumpdata',
'fixtures_regress.widget',
'fixtures_regress.widgetproxy',
format='json',
stdout=out,
)
self.assertJSONEqual(
out.getvalue(),
"""[{"pk": %d, "model": "fixtures_regress.widget", "fields": {"name": "grommet"}}]"""
% widget.pk
)
@skipUnlessDBFeature('supports_forward_references')
def test_loaddata_works_when_fixture_has_forward_refs(self):
"""
Regression for #3615 - Forward references cause fixtures not to load in MySQL (InnoDB)
"""
management.call_command(
'loaddata',
'forward_ref.json',
verbosity=0,
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_raises_error_when_fixture_has_invalid_foreign_key(self):
"""
Regression for #3615 - Ensure data with nonexistent child key references raises error
"""
with six.assertRaisesRegex(self, IntegrityError,
"Problem installing fixture"):
management.call_command(
'loaddata',
'forward_ref_bad_data.json',
verbosity=0,
)
@skipUnlessDBFeature('supports_forward_references')
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1'),
os.path.join(_cur_dir, 'fixtures_2')])
def test_loaddata_forward_refs_split_fixtures(self):
"""
Regression for #17530 - should be able to cope with forward references
when the fixtures are not in the same files or directories.
"""
management.call_command(
'loaddata',
'forward_ref_1.json',
'forward_ref_2.json',
verbosity=0,
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_no_fixture_specified(self):
"""
Regression for #7043 - Error is quickly reported when no fixtures is provided in the command line.
"""
with six.assertRaisesRegex(self, management.CommandError,
"No database fixture specified. Please provide the path of "
"at least one fixture in the command line."):
management.call_command(
'loaddata',
verbosity=0,
)
def test_ticket_20820(self):
"""
Regression for ticket #20820 -- loaddata on a model that inherits
from a model with a M2M shouldn't blow up.
"""
management.call_command(
'loaddata',
'special-article.json',
verbosity=0,
)
def test_ticket_22421(self):
"""
Regression for ticket #22421 -- loaddata on a model that inherits from
a grand-parent model with a M2M but via an abstract parent shouldn't
blow up.
"""
management.call_command(
'loaddata',
'feature.json',
verbosity=0,
)
def test_loaddata_with_m2m_to_self(self):
"""
Regression test for ticket #17946.
"""
management.call_command(
'loaddata',
'm2mtoself.json',
verbosity=0,
)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1'),
os.path.join(_cur_dir, 'fixtures_1')])
def test_fixture_dirs_with_duplicates(self):
"""
settings.FIXTURE_DIRS cannot contain duplicates in order to avoid
repeated fixture loading.
"""
self.assertRaisesMessage(
ImproperlyConfigured,
"settings.FIXTURE_DIRS contains duplicates.",
management.call_command,
'loaddata',
'absolute.json',
verbosity=0,
)
@skipIfNonASCIIPath
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures')])
def test_fixture_dirs_with_default_fixture_path(self):
"""
settings.FIXTURE_DIRS cannot contain a default fixtures directory
for application (app/fixtures) in order to avoid repeated fixture loading.
"""
self.assertRaisesMessage(
ImproperlyConfigured,
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS."
% (os.path.join(_cur_dir, 'fixtures'), 'fixtures_regress'),
management.call_command,
'loaddata',
'absolute.json',
verbosity=0,
)
@override_settings(FIXTURE_DIRS=[os.path.join(_cur_dir, 'fixtures_1'),
os.path.join(_cur_dir, 'fixtures_2')])
def test_loaddata_with_valid_fixture_dirs(self):
management.call_command(
'loaddata',
'absolute.json',
verbosity=0,
)
class NaturalKeyFixtureTests(TestCase):
def test_nk_deserialize(self):
"""
Test for ticket #13030 - Python based parser version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
)
self.assertEqual(
NKChild.objects.get(pk=1).data,
'apple'
)
self.assertEqual(
RefToNKChild.objects.get(pk=1).nk_fk.data,
'apple'
)
def test_nk_deserialize_xml(self):
"""
Test for ticket #13030 - XML version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
)
management.call_command(
'loaddata',
'nk-inheritance2.xml',
verbosity=0,
)
self.assertEqual(
NKChild.objects.get(pk=2).data,
'banana'
)
self.assertEqual(
RefToNKChild.objects.get(pk=2).nk_fk.data,
'apple'
)
def test_nk_on_serialize(self):
"""
Check that natural key requirements are taken into account
when serializing models
"""
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
)
out = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.book',
'fixtures_regress.person',
'fixtures_regress.store',
verbosity=0,
format='json',
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
stdout=out,
)
self.assertJSONEqual(
out.getvalue(),
"""
[{"fields": {"main": null, "name": "Amazon"}, "model": "fixtures_regress.store"},
{"fields": {"main": null, "name": "Borders"}, "model": "fixtures_regress.store"},
{"fields": {"name": "Neal Stephenson"}, "model": "fixtures_regress.person"},
{"pk": 1, "model": "fixtures_regress.book", "fields": {"stores": [["Amazon"], ["Borders"]],
"name": "Cryptonomicon", "author": ["Neal Stephenson"]}}]
"""
)
def test_dependency_sorting(self):
"""
Now lets check the dependency sorting explicitly
It doesn't matter what order you mention the models
Store *must* be serialized before then Person, and both
must be serialized before Book.
"""
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Book, Person, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_2(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Book, Store, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_3(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Store, Book, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_4(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Store, Person, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_5(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Person, Book, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_6(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Person, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_dangling(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Person, Circle1, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Circle1, Store, Person, Book]
)
def test_dependency_sorting_tight_circular(self):
self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle1, "
"fixtures_regress.Circle2 in serialized app list.",
serializers.sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Store, Book])],
)
def test_dependency_sorting_tight_circular_2(self):
self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle1, "
"fixtures_regress.Circle2 in serialized app list.",
serializers.sort_dependencies,
[('fixtures_regress', [Circle1, Book, Circle2])],
)
def test_dependency_self_referential(self):
self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle3 in "
"serialized app list.",
serializers.sort_dependencies,
[('fixtures_regress', [Book, Circle3])],
)
def test_dependency_sorting_long(self):
self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.Circle1, "
"fixtures_regress.Circle2, fixtures_regress.Circle3 in serialized "
"app list.",
serializers.sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Circle3, Store, Book])],
)
def test_dependency_sorting_normal(self):
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [Person, ExternalDependency, Book])]
)
self.assertEqual(
sorted_deps,
[Person, Book, ExternalDependency]
)
def test_normal_pk(self):
"""
Check that normal primary keys still work
on a model with natural key capabilities
"""
management.call_command(
'loaddata',
'non_natural_1.json',
verbosity=0,
)
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
)
management.call_command(
'loaddata',
'non_natural_2.xml',
verbosity=0,
)
books = Book.objects.all()
self.assertEqual(
books.__repr__(),
"[<Book: Cryptonomicon by Neal Stephenson (available at Amazon, Borders)>, "
"<Book: Ender's Game by Orson Scott Card (available at Collins Bookstore)>, "
"<Book: Permutation City by Greg Egan (available at Angus and Robertson)>]"
)
class M2MNaturalKeyFixtureTests(TestCase):
"""Tests for ticket #14426."""
def test_dependency_sorting_m2m_simple(self):
"""
M2M relations without explicit through models SHOULD count as dependencies
Regression test for bugs that could be caused by flawed fixes to
#14226, namely if M2M checks are removed from sort_dependencies
altogether.
"""
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [M2MSimpleA, M2MSimpleB])]
)
self.assertEqual(sorted_deps, [M2MSimpleB, M2MSimpleA])
def test_dependency_sorting_m2m_simple_circular(self):
"""
Resolving circular M2M relations without explicit through models should
fail loudly
"""
self.assertRaisesMessage(
RuntimeError,
"Can't resolve dependencies for fixtures_regress.M2MSimpleCircularA, "
"fixtures_regress.M2MSimpleCircularB in serialized app list.",
serializers.sort_dependencies,
[('fixtures_regress', [M2MSimpleCircularA, M2MSimpleCircularB])]
)
def test_dependency_sorting_m2m_complex(self):
"""
M2M relations with explicit through models should NOT count as
dependencies. The through model itself will have dependencies, though.
"""
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [M2MComplexA, M2MComplexB, M2MThroughAB])]
)
# Order between M2MComplexA and M2MComplexB doesn't matter. The through
# model has dependencies to them though, so it should come last.
self.assertEqual(sorted_deps[-1], M2MThroughAB)
def test_dependency_sorting_m2m_complex_circular_1(self):
"""
Circular M2M relations with explicit through models should be serializable
"""
A, B, C, AtoB, BtoC, CtoA = (M2MComplexCircular1A, M2MComplexCircular1B,
M2MComplexCircular1C, M2MCircular1ThroughAB,
M2MCircular1ThroughBC, M2MCircular1ThroughCA)
try:
sorted_deps = serializers.sort_dependencies(
[('fixtures_regress', [A, B, C, AtoB, BtoC, CtoA])]
)
except CommandError:
self.fail("Serialization dependency solving algorithm isn't "
"capable of handling circular M2M setups with "
"intermediate models.")
# The dependency sorting should not result in an error, and the
# through model should have dependencies to the other models and as
# such come last in the list.
self.assertEqual(sorted_deps[:3], [A, B, C])
self.assertEqual(sorted_deps[3:], [AtoB, BtoC, CtoA])
def test_dependency_sorting_m2m_complex_circular_2(self):
"""
Circular M2M relations with explicit through models should be serializable
This test tests the circularity with explicit natural_key.dependencies
"""
try:
sorted_deps = serializers.sort_dependencies([
('fixtures_regress', [
M2MComplexCircular2A,
M2MComplexCircular2B,
M2MCircular2ThroughAB])
])
except CommandError:
self.fail("Serialization dependency solving algorithm isn't "
"capable of handling circular M2M setups with "
"intermediate models plus natural key dependency hints.")
self.assertEqual(sorted_deps[:2], [M2MComplexCircular2A, M2MComplexCircular2B])
self.assertEqual(sorted_deps[2:], [M2MCircular2ThroughAB])
def test_dump_and_load_m2m_simple(self):
"""
Test serializing and deserializing back models with simple M2M relations
"""
a = M2MSimpleA.objects.create(data="a")
b1 = M2MSimpleB.objects.create(data="b1")
b2 = M2MSimpleB.objects.create(data="b2")
a.b_set.add(b1)
a.b_set.add(b2)
out = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.M2MSimpleA',
'fixtures_regress.M2MSimpleB',
use_natural_foreign_keys=True,
stdout=out,
)
for model in [M2MSimpleA, M2MSimpleB]:
model.objects.all().delete()
objects = serializers.deserialize("json", out.getvalue())
for obj in objects:
obj.save()
new_a = M2MSimpleA.objects.get_by_natural_key("a")
self.assertQuerysetEqual(new_a.b_set.all(), [
"<M2MSimpleB: b1>",
"<M2MSimpleB: b2>"
], ordered=False)
class TestTicket11101(TransactionTestCase):
available_apps = [
'fixtures_regress',
'django.contrib.auth',
'django.contrib.contenttypes',
]
@skipUnlessDBFeature('supports_transactions')
def test_ticket_11101(self):
"""Test that fixtures can be rolled back (ticket #11101)."""
with transaction.atomic():
management.call_command(
'loaddata',
'thingy.json',
verbosity=0,
)
self.assertEqual(Thingy.objects.count(), 1)
transaction.set_rollback(True)
self.assertEqual(Thingy.objects.count(), 0)
class TestLoadFixtureFromOtherAppDirectory(TestCase):
"""
#23612 -- fixtures path should be normalized to allow referencing relative
paths on Windows.
"""
current_dir = os.path.abspath(os.path.dirname(__file__))
# relative_prefix is something like tests/fixtures_regress or
# fixtures_regress depending on how runtests.py is invoked.
# All path separators must be / in order to be a proper regression test on
# Windows, so replace as appropriate.
relative_prefix = os.path.relpath(current_dir, os.getcwd()).replace('\\', '/')
fixtures = [relative_prefix + '/fixtures/absolute.json']
def test_fixtures_loaded(self):
count = Absolute.objects.count()
self.assertGreater(count, 0, "Fixtures not loaded properly.")
|
nicholas-silveira/art_pipeline
|
refs/heads/master
|
maya/packages/oop_maya/tools/animation/ui/__init__.py
|
12133432
| |
jimi-c/ansible
|
refs/heads/devel
|
test/units/modules/network/dellos9/__init__.py
|
12133432
| |
adamkh/kivy
|
refs/heads/master
|
kivy/tools/extensions/__init__.py
|
12133432
| |
fxa90id/mozillians
|
refs/heads/master
|
lib/jinjautils.py
|
12
|
# TODO: let's see if we can get rid of this, it's garbage
from django.contrib.admin import options, actions, sites
from django.template import loader
import jingo
def django_to_jinja(template_name, context, **kw):
"""
We monkeypatch Django admin's render_to_response to work in our Jinja
environment. We have an admin/base_site.html template that Django's
templates inherit, but instead of rendering html, it renders the Django
pieces into a Jinja template. We get all of Django's html, but wrapped in
our normal site structure.
"""
context_instance = kw.pop('context_instance')
source = loader.render_to_string(template_name, context, context_instance)
request = context_instance['request']
return jingo.render(request, jingo.env.from_string(source))
actions.render_to_response = django_to_jinja
options.render_to_response = django_to_jinja
sites.render_to_response = django_to_jinja
def jinja_for_django(template_name, context=None, **kw):
"""
If you want to use some built in logic (or a contrib app) but need to
override the templates to work with Jinja, replace the object's
render_to_response function with this one. That will render a Jinja
template through Django's functions. An example can be found in the users
app.
"""
if context is None:
context = {}
context_instance = kw.pop('context_instance')
request = context_instance['request']
for d in context_instance.dicts:
context.update(d)
return jingo.render(request, template_name, context, **kw)
|
chouseknecht/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/netapp/netapp_e_amg.py
|
21
|
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_amg
short_description: NetApp E-Series create, remove, and update asynchronous mirror groups
description:
- Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
extends_documentation_fragment:
- netapp.eseries
options:
name:
description:
- The name of the async array you wish to target, or create.
- If C(state) is present and the name isn't found, it will attempt to create.
required: yes
secondaryArrayId:
description:
- The ID of the secondary array to be used in mirroring process
required: yes
syncIntervalMinutes:
description:
- The synchronization interval in minutes
default: 10
manualSync:
description:
- Setting this to true will cause other synchronization values to be ignored
type: bool
default: 'no'
recoveryWarnThresholdMinutes:
description:
- Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
default: 20
repoUtilizationWarnThreshold:
description:
- Recovery point warning threshold
default: 80
interfaceType:
description:
- The intended protocol to use if both Fibre and iSCSI are available.
choices:
- iscsi
- fibre
syncWarnThresholdMinutes:
description:
- The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
default: 10
state:
description:
- A C(state) of present will either create or update the async mirror group.
- A C(state) of absent will remove the async mirror group.
choices: [ absent, present ]
required: yes
"""
EXAMPLES = """
- name: AMG removal
na_eseries_amg:
state: absent
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
- name: AMG create
netapp_e_amg:
state: present
ssid: "{{ ssid }}"
secondaryArrayId: "{{amg_secondaryArrayId}}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
new_name: "{{amg_array_name}}"
name: "{{amg_name}}"
when: amg_create
"""
RETURN = """
msg:
description: Successful creation
returned: success
type: str
sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
""" # NOQA
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.netapp import request, eseries_host_argument_spec
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def has_match(module, ssid, api_url, api_pwd, api_usr, body):
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
label_exists = False
matches_spec = False
current_state = None
async_id = None
api_data = None
desired_name = body.get('name')
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
try:
rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
except Exception as e:
module.exit_json(msg="Error finding a match. Message: %s" % to_native(e), exception=traceback.format_exc())
for async_group in data:
if async_group['label'] == desired_name:
label_exists = True
api_data = async_group
async_id = async_group['groupRef']
current_state = dict(
syncIntervalMinutes=async_group['syncIntervalMinutes'],
syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
)
if current_state == desired_state:
matches_spec = True
return label_exists, matches_spec, api_data, async_id
def create_async(module, ssid, api_url, api_pwd, api_usr, body):
endpoint = 'storage-systems/%s/async-mirrors' % ssid
url = api_url + endpoint
post_data = json.dumps(body)
try:
rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
headers=HEADERS)
except Exception as e:
module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return data
def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
desired_state = dict((x, (body.get(x))) for x in compare_keys)
if new_name:
desired_state['new_name'] = new_name
post_data = json.dumps(desired_state)
try:
rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
url_username=user, url_password=pwd)
except Exception as e:
module.exit_json(msg="Exception while updating async mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return data
def remove_amg(module, ssid, api_url, pwd, user, async_id):
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
url = api_url + endpoint
try:
rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
headers=HEADERS)
except Exception as e:
module.exit_json(msg="Exception while removing async mirror group. Message: %s" % to_native(e),
exception=traceback.format_exc())
return
def main():
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(required=True, type='str'),
new_name=dict(required=False, type='str'),
secondaryArrayId=dict(required=True, type='str'),
syncIntervalMinutes=dict(required=False, default=10, type='int'),
manualSync=dict(required=False, default=False, type='bool'),
recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
state=dict(required=True, choices=['present', 'absent']),
syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
))
module = AnsibleModule(argument_spec=argument_spec)
p = module.params
ssid = p.pop('ssid')
api_url = p.pop('api_url')
user = p.pop('api_username')
pwd = p.pop('api_password')
new_name = p.pop('new_name')
state = p.pop('state')
if not api_url.endswith('/'):
api_url += '/'
name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
if state == 'present':
if name_exists and spec_matches:
module.exit_json(changed=False, msg="Desired state met", **api_data)
elif name_exists and not spec_matches:
results = update_async(module, ssid, api_url, pwd, user,
p, new_name, async_id)
module.exit_json(changed=True,
msg="Async mirror group updated", async_id=async_id,
**results)
elif not name_exists:
results = create_async(module, ssid, api_url, user, pwd, p)
module.exit_json(changed=True, **results)
elif state == 'absent':
if name_exists:
remove_amg(module, ssid, api_url, pwd, user, async_id)
module.exit_json(changed=True, msg="Async mirror group removed.",
async_id=async_id)
else:
module.exit_json(changed=False,
msg="Async Mirror group: %s already absent" % p['name'])
if __name__ == '__main__':
main()
|
paolodedios/tensorflow
|
refs/heads/master
|
tensorflow/python/distribute/custom_training_loop_input_test.py
|
5
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom training loops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.tpu import tpu
from tensorflow.python.util import nest
def get_dataset_from_tensor_slices(inp_array):
dataset = dataset_ops.DatasetV2.from_tensor_slices(inp_array)
# TODO(b/138326910): Remove Dataset V1 version once bug resolved.
if not tf2.enabled():
dataset = dataset_ops.Dataset.from_tensor_slices(inp_array)
return dataset
class AssertFlattenedMixin(object):
"""Mixin for specialized asserts."""
def assert_equal_flattened(self, expected_results, actual_results):
"""Asserts that flattened results are equal.
Due to the number of replicas in the strategy, the output may have a
different structure and needs to be flattened for comparison.
Args:
expected_results: The results expected as a result of a computation.
actual_results: The actual results of a computation.
"""
self.assertEqual(len(expected_results), len(actual_results))
for i, expected_result in enumerate(expected_results):
final_result = []
actual_result = actual_results[i]
for val in actual_result:
final_result.extend(val.numpy())
self.assertAllEqual(expected_result, final_result)
class InputIterationTest(test.TestCase, parameterized.TestCase,
AssertFlattenedMixin):
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testConstantNumpyInput(self, distribution):
@def_function.function
def run(x):
def computation(x):
return math_ops.square(x)
outputs = distribution.experimental_local_results(
distribution.run(computation, args=(x,)))
return outputs
self.assertAllEqual(
constant_op.constant(4., shape=(distribution.num_replicas_in_sync)),
run(2.))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testStatefulExperimentalRunAlwaysExecute(self, distribution):
with distribution.scope():
v = variables.Variable(
0.0, aggregation=variables.VariableAggregation.MEAN)
@def_function.function
def train_step():
def assign_add():
v.assign_add(1.0)
distribution.run(assign_add)
return array_ops.zeros([])
train_step()
self.assertAllEqual(1.0, v.numpy())
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.strategies_minus_tpu,
mode=["eager"]))
def testFullEager(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
def train_step(data):
return math_ops.square(data)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = distribution.experimental_local_results(
distribution.run(train_step, args=(x,)))
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies, mode=["eager"]))
def testGetNextAsOptional(self, distribution):
data = [5., 6., 7., 8.]
dataset = get_dataset_from_tensor_slices(data).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
iterator = iter(dist_dataset)
def train_step(data):
return math_ops.square(data)
@def_function.function
def run(iterator):
return distribution.experimental_local_results(
distribution.run(
train_step, args=(iterator.get_next_as_optional().get_value(),)))
self.assert_equal_flattened([[25., 36.]], [run(iterator)])
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies, mode=["eager"]))
def testGetNextAsOptionalExampleUsage(self, distribution):
global_batch_size = 2
steps_per_loop = 6
dataset = dataset_ops.Dataset.range(
8, output_type=dtypes.int32).batch(global_batch_size)
distributed_iterator = iter(
distribution.experimental_distribute_dataset(dataset))
@def_function.function
def train_fn(distributed_iterator):
def step_fn(x):
return x
for _ in math_ops.range(steps_per_loop):
optional_data = distributed_iterator.get_next_as_optional()
if not optional_data.has_value():
break
distribution.run(step_fn, args=(optional_data.get_value(),))
train_fn(distributed_iterator)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.tpu_strategies, mode=["eager"]))
def testFullEagerTPU(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
def train_step(data):
return math_ops.square(data)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
with self.assertRaisesRegex(NotImplementedError,
"does not support pure eager execution"):
distribution.run(train_step, args=(next(input_iterator),))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testStepInFunction(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
@def_function.function
def train_step(data):
return math_ops.square(data)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = distribution.experimental_local_results(
distribution.run(train_step, args=(x,)))
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testRunInFunction(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
def train_step(data):
return math_ops.square(data)
@def_function.function
def f_train_step(input_data):
return distribution.experimental_local_results(
distribution.run(train_step, args=(input_data,)))
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = f_train_step(x)
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["eager"]))
def testNestedOutput(self, distribution):
dataset = get_dataset_from_tensor_slices([0, 1, 2, 3]).batch(2)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
def computation(x):
return [{
"a": x - 1,
"b": x + 1
}]
inputs = next(iterator)
outputs = distribution.run(computation, args=(inputs,))
return nest.map_structure(distribution.experimental_local_results,
outputs)
results = run(input_iterator)
for replica in range(distribution.num_replicas_in_sync):
# The input dataset is range(4), so the replica id is same as input.
self.assertAllEqual(results[0]["a"][replica], [replica - 1])
self.assertAllEqual(results[0]["b"][replica], [replica + 1])
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testRunInFunctionAutoGraphApplication(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
def train_step(data):
return math_ops.square(data)
@def_function.function
def f_train_step(input_data):
return distribution.experimental_local_results(
distribution.run(train_step, args=(input_data,)))
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = []
for x in dist_dataset:
output = f_train_step(x)
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetIterationInFunction(self, distribution):
with distribution.scope():
a = variables.Variable(
1.0, aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA)
def train_step(_):
a.assign_add(1.0)
@def_function.function
def f_train_step(dist_dataset):
number_of_steps = constant_op.constant(0.0)
product_of_means = constant_op.constant(2.0)
for x in dist_dataset: # loop with values modified each iteration
number_of_steps += 1
product_of_means *= math_ops.cast(
distribution.reduce("MEAN", x, axis=0), product_of_means.dtype)
for y in dist_dataset: # loop with no intermediate state
distribution.run(train_step, args=(y,))
return number_of_steps, product_of_means
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
number_of_steps, product_of_means = f_train_step(dist_dataset)
self.assertEqual(2, number_of_steps.numpy())
self.assertNear((2 * (5+6)/2 * (7+8)/2), product_of_means.numpy(), 1e-3)
# We set the initial value of `a` to 1 and iterate through the dataset 2
# times(4/2 where 4 is the number of dataset elements and 2 is the batch
# size). Hence the final result is 3.
self.assertEqual(3.0, (a.numpy()))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetAssertWithDynamicBatch(self, distribution):
# Regression test for github issue 33517.
def step_fn(data):
assert_op = control_flow_ops.Assert(math_ops.less_equal(
math_ops.reduce_max(data), 100.), [data])
with ops.control_dependencies([assert_op]):
return math_ops.square(data)
@def_function.function
def train(dataset):
results = []
iterator = iter(dataset)
# we iterate through the loop 5 times since we have 3 elements and a
# global batch of 2.
for _ in range(2):
elem = next(iterator)
output = distribution.experimental_local_results(
distribution.run(step_fn, args=(elem,)))
results.append(output)
return results
dataset = dataset_ops.DatasetV2.from_tensor_slices([5., 6., 7.,]).batch(2)
# TODO(b/138326910): Remove Dataset V1 version once bug resolved.
if not tf2.enabled():
dataset = dataset_ops.Dataset.from_tensor_slices([5., 6., 7.,]).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = train(dist_dataset)
expected_results = [[25., 36.], [49.]]
self.assertEqual(len(expected_results), len(results))
# Need to expand results since output will be grouped differently depending
# on the number of replicas.
for i, expected_result in enumerate(expected_results):
final_result = []
actual_result = results[i]
for val in actual_result:
final_result.extend(val.numpy())
self.assertAllEqual(expected_result, final_result)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDistributeDatasetIteratorWithoutFunction(self, distribution):
data = [5., 6., 7., 8.]
input_iterator = iter(
distribution.distribute_datasets_from_function(
lambda _: get_dataset_from_tensor_slices(data)))
self.assertAllEqual(
distribution.experimental_local_results(input_iterator.get_next()),
data[0:distribution.num_replicas_in_sync])
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDistributeDatasetIteratorWithFunction(self, distribution):
data = [5., 6., 7., 8.]
input_iterator = iter(
distribution.distribute_datasets_from_function(
lambda _: get_dataset_from_tensor_slices(data)))
@def_function.function
def run(iterator):
return distribution.experimental_local_results(iterator.get_next())
local_results = run(input_iterator)
self.assertAllEqual(local_results,
data[0:distribution.num_replicas_in_sync])
backing_devices = [result.backing_device for result in local_results]
self.assertAllEqual(backing_devices, distribution.extended.worker_devices)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDistributeDatasetPrefetch(self, distribution):
data = [5., 6., 7., 8.]
input_iterator = iter(
distribution.experimental_distribute_dataset(
get_dataset_from_tensor_slices(data).batch(2)))
local_results = distribution.experimental_local_results(
input_iterator.get_next())
backing_devices = [result.backing_device for result in local_results]
self.assertAllEqual(backing_devices, distribution.extended.worker_devices)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDistributeDatasetFunctionPrefetch(self, distribution):
data = [5., 6., 7., 8.]
input_iterator = iter(
distribution.distribute_datasets_from_function(
lambda _: get_dataset_from_tensor_slices(data)))
local_results = distribution.experimental_local_results(
input_iterator.get_next())
backing_devices = [result.backing_device for result in local_results]
self.assertAllEqual(backing_devices, distribution.extended.worker_devices)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.tpu_strategies,
mode=["eager"]
))
def testDistributeDatasetHostPrefetch(self, distribution):
data = [5., 6., 7., 8.]
input_iterator = iter(
distribution.experimental_distribute_dataset(
get_dataset_from_tensor_slices(data).batch(2),
distribute_lib.InputOptions(experimental_fetch_to_device=False)))
local_results = distribution.experimental_local_results(
input_iterator.get_next())
for result in local_results:
self.assertEqual(result.backing_device,
device_util.resolve("/device:CPU:0"))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.tpu_strategies,
mode=["eager"]
))
def testDistributeDatasetFunctionHostPrefetch(self, distribution):
data = [5., 6., 7., 8.]
input_iterator = iter(
distribution.distribute_datasets_from_function(
lambda _: get_dataset_from_tensor_slices(data),
distribute_lib.InputOptions(experimental_fetch_to_device=False)))
local_results = distribution.experimental_local_results(
input_iterator.get_next())
for result in local_results:
self.assertEqual(result.backing_device,
device_util.resolve("/device:CPU:0"))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDynamicShapes(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
def computation(x):
return math_ops.reduce_mean(x)
inputs = next(iterator)
outputs = distribution.experimental_local_results(
distribution.run(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([5.5, 7.], run(input_iterator))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.tpu_strategy, mode=["eager"]))
def testDynamicShapesWithRunOptionsBucketizing(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
options = distribute_lib.RunOptions(
experimental_bucketizing_dynamic_shape=True)
@def_function.function
def run(iterator):
def computation(x):
return math_ops.reduce_mean(x)
inputs = next(iterator)
outputs = distribution.experimental_local_results(
distribution.run(
computation, args=(inputs,), options=options))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([5.5, 7.], run(input_iterator))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.tpu_strategy, mode=["eager"]))
def testDynamicShapesWithRunOptionsDisableDynamicPadder(self, distribution):
dataset = get_dataset_from_tensor_slices([5, 6, 7]).batch(4)
mask_dataset = get_dataset_from_tensor_slices([1, 0, 1]).batch(4)
dataset = dataset_ops.DatasetV2.zip((dataset, mask_dataset))
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
options = distribute_lib.RunOptions(
experimental_xla_options=tpu.XLAOptions(
enable_xla_dynamic_padder=False))
@def_function.function
def run(iterator):
def computation(inputs):
x, mask = inputs
y = x * mask
return math_ops.reduce_sum(y)
inputs = next(iterator)
outputs = distribution.experimental_local_results(
distribution.run(computation, args=(inputs,), options=options))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([5, 7], run(input_iterator))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]))
def testDynamicOutputsWithX64(self, distribution):
dataset = get_dataset_from_tensor_slices(
[5]).map(lambda x: math_ops.cast(x, dtypes.int64)).batch(2)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
def computation(x):
return math_ops.add(x, x)
inputs = next(iterator)
outputs = distribution.experimental_local_results(
distribution.run(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
result = run(input_iterator)
self.assertAllEqual([10], result[0])
self.assertAllEqual([], result[1])
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDynamicShapesWithGetNextOutsideFunction(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(inputs):
def computation(x):
return math_ops.reduce_mean(x)
outputs = distribution.experimental_local_results(
distribution.run(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([5.5, 7.], run(next(input_iterator)))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testStrategyReduceWithDynamicShapes(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
inputs = next(iterator)
return distribution.reduce(reduce_util.ReduceOp.MEAN, inputs, axis=0)
self.assertAllEqual(6., run(input_iterator))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testStrategyReduceWithDynamicShapesRank2(self, distribution):
dataset = get_dataset_from_tensor_slices(
[[1., 1.], [1., 1.], [1., 1.]]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
inputs = next(iterator)
return distribution.reduce(reduce_util.ReduceOp.MEAN, inputs, axis=0)
self.assertAllEqual([1., 1.], run(input_iterator))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]
))
def testDynamicShapesWithSizeOp(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(inputs):
def computation(x):
return array_ops.size_v2(x)
outputs = distribution.experimental_local_results(
distribution.run(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([2, 1], run(next(input_iterator)))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]))
def testSegmentSumWithDynamicNumberOfSegments(self, distribution):
def dataset_fn(_):
data = array_ops.zeros(5, dtype=dtypes.int32)
dataset = get_dataset_from_tensor_slices(data)
dataset = dataset.batch(3)
return dataset
input_iterator = iter(
distribution.distribute_datasets_from_function(dataset_fn))
@def_function.function
def step_fn(example):
segment_ids = array_ops.zeros_like_v2(example)
num_segment = array_ops.shape(example)[0]
# If number of segments is dynamic, output should be a dynamic shape.
return math_ops.unsorted_segment_sum(example, segment_ids, num_segment)
# This assumes that there are exactly 2 replicas
outputs = distribution.experimental_local_results(
distribution.run(step_fn, args=(next(input_iterator),)))
self.assertAllEqual((3,), outputs[0].shape)
self.assertAllEqual((2,), outputs[1].shape)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]))
def testReshapeWithDynamicInputs(self, distribution):
def dataset_fn(_):
data = array_ops.zeros((5, 1, 2), dtype=dtypes.int32)
dataset = get_dataset_from_tensor_slices(data)
dataset = dataset.batch(3)
return dataset
input_iterator = iter(
distribution.distribute_datasets_from_function(dataset_fn))
@def_function.function
def step_fn(example):
# example: [<=3, 1, 2]
# tile: [<=3, <=3, 2]
tile = array_ops.tile(example, [1, array_ops.shape(example)[0], 1])
# reshape1: [<=(3*3 = 9), 2]
reshape1 = array_ops.reshape(tile, [-1, 2])
# reshape2: [<=3, <=3, 2]
reshape2 = array_ops.reshape(
reshape1,
[array_ops.shape(example)[0],
array_ops.shape(example)[0], 2])
# reshape3: [<=3, -1, 2]
reshape3 = array_ops.reshape(reshape1,
[array_ops.shape(example)[0], -1, 2])
# reshape4: [-1, <=3, 2]
reshape4 = array_ops.reshape(reshape1,
[-1, array_ops.shape(example)[0], 2])
# Reshape1 is duplicated in order to test dynamic dimension on copies.
return [reshape1, reshape2, reshape3, reshape4, reshape1]
# This assumes that there are exactly 2 replicas
outputs = distribution.experimental_local_results(
distribution.run(step_fn, args=(next(input_iterator),)))
self.assertAllEqual((9, 2), outputs[0][0].shape)
self.assertAllEqual((3, 3, 2), outputs[0][1].shape)
self.assertAllEqual((3, 3, 2), outputs[0][2].shape)
self.assertAllEqual((3, 3, 2), outputs[0][3].shape)
self.assertAllEqual((9, 2), outputs[0][4].shape)
self.assertAllEqual((4, 2), outputs[1][0].shape)
self.assertAllEqual((2, 2, 2), outputs[1][1].shape)
self.assertAllEqual((2, 2, 2), outputs[1][2].shape)
self.assertAllEqual((2, 2, 2), outputs[1][3].shape)
self.assertAllEqual((4, 2), outputs[1][4].shape)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]))
def testDynamicShapesWithFirstReplicaNotMaximumShape(self, distribution):
def dataset_fn(_):
dataset1 = get_dataset_from_tensor_slices([[1., 2.], [1., 2.]])
dataset2 = get_dataset_from_tensor_slices([[1., 2., 3.],
[1., 2., 3.]])
dataset = dataset1.concatenate(dataset2)
dataset = dataset.batch(2, drop_remainder=True)
return dataset
input_iterator = iter(
distribution.distribute_datasets_from_function(dataset_fn))
@def_function.function
def run(inputs):
def computation(x):
return math_ops.reduce_mean(x)
outputs = distribution.experimental_local_results(
distribution.run(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([1.5, 2.], run(next(input_iterator)))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]))
def testMapFnWithDynamicInputs(self, distribution):
def dataset_fn(_):
data = array_ops.zeros((20, 300, 32), dtype=dtypes.int32)
dataset = get_dataset_from_tensor_slices(data)
dataset = dataset.batch(16)
return dataset
input_iterator = iter(
distribution.distribute_datasets_from_function(dataset_fn))
def embedding_lookup(inputs):
embedding_weights = array_ops.zeros((1, 128))
flat_inputs = array_ops.reshape(inputs, [-1])
embeddings = array_ops.gather(embedding_weights, flat_inputs)
embeddings = array_ops.reshape(embeddings, inputs.shape.as_list() + [128])
return embeddings
@def_function.function
def step_fn(example):
return map_fn.map_fn(
embedding_lookup, example, fn_output_signature=dtypes.float32)
# This assumes that there are exactly 2 replicas
outputs = distribution.experimental_local_results(
distribution.run(step_fn, args=(next(input_iterator),)))
self.assertAllEqual((16, 300, 32, 128), outputs[0].shape)
self.assertAllEqual((4, 300, 32, 128), outputs[1].shape)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetDistributeEvenlyDivisibleDrop(self, distribution):
# If the batch size is evenly divisible by the number of workers and we set
# drop_remainder=True on the dataset, then DistributedIterator will use a
# different (and more efficient) code path which avoids some control flow
# ops.
dataset = get_dataset_from_tensor_slices([5., 6.]).batch(
2, drop_remainder=True)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
data = next(input_iterator)
expected_result = [5., 6.]
final_result = []
actual_result = distribution.experimental_local_results(data)
for val in actual_result:
final_result.extend(val)
self.assertAllEqual(expected_result, final_result)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetDistributeNotDivisibleDrop(self, distribution):
# If each batch is not evenly divisible by the number of workers,
# the remainder will be dropped.
dataset = get_dataset_from_tensor_slices([5., 6.]).batch(
1, drop_remainder=True)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
data = next(input_iterator)
expected_result = [5.]
final_result = []
actual_result = distribution.experimental_local_results(data)
for val in actual_result:
final_result.extend(val)
self.assertAllEqual(expected_result, final_result)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetDistributeEvenlyDivisibleNoDrop(self, distribution):
# Setting drop_remainder=False on the dataset causes DistributedIterator
# to use get_next_as_optional(), even if the batched dataset is evenly
# divisible by the number of workers.
dataset = get_dataset_from_tensor_slices([5., 6.]).batch(
2, drop_remainder=False)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
data = next(input_iterator)
expected_result = [5., 6.]
final_result = []
actual_result = distribution.experimental_local_results(data)
for val in actual_result:
final_result.extend(val)
self.assertAllEqual(expected_result, final_result)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetPartialBatchWithMixedOutputs(self, distribution):
# Dynamic output size with a mix of static and dynamic outputs
dataset = get_dataset_from_tensor_slices([5.]).batch(2)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
def computation(x):
# Fixed size output with a dynamic sized output.
return array_ops.zeros([3]), math_ops.square(x)
return distribution.run(
computation, args=(next(iterator),))
results = run(input_iterator)
# First result is fixed for all replicas.
for replica_id in range(distribution.num_replicas_in_sync):
self.assertAllEqual([0., 0., 0.],
distribution.experimental_local_results(
results[0])[replica_id])
# Only first replica has distributed dataset computation.
self.assertAllEqual([25.],
distribution.experimental_local_results(results[1])[0])
# Other replicas have no distributed dataset computation.
for replica_id in range(1, distribution.num_replicas_in_sync):
self.assertAllEqual([],
distribution.experimental_local_results(
results[1])[replica_id])
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testIterationInsideFunction(self, distribution):
def step_fn(data):
return math_ops.square(data)
@def_function.function
def train(dataset):
results = []
iterator = iter(dataset)
# we iterate through the loop 2 times since we have 4 elements and a
# global batch of 2.
for _ in range(2):
elem = next(iterator)
output = distribution.experimental_local_results(
distribution.run(step_fn, args=(elem,)))
results.append(output)
return results
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
results = train(dist_dataset)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testIterationOutsideFunction(self, distribution):
def train_step(data):
return math_ops.square(data)
@def_function.function
def f_train_step(input_data):
return distribution.experimental_local_results(
distribution.run(train_step, args=(input_data,)))
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
iterator = iter(dist_dataset)
results = []
# we iterate through the loop 2 times since we have 4 elements and a
# global batch of 2.
for _ in range(2):
output = f_train_step(next(iterator))
results.append(output)
self.assert_equal_flattened([[25., 36.], [49., 64.]], results)
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testMultiDeviceDataCapturedFunction(self, distribution):
inputs = constant_op.constant([2., 3.])
dataset = lambda _: dataset_ops.Dataset.from_tensor_slices(inputs).repeat(5)
input_iterator = iter(
distribution.distribute_datasets_from_function(dataset))
with distribution.scope():
var = variables.Variable(1.0)
@def_function.function
def train_step(input_iterator):
def func(inputs):
return math_ops.square(inputs) + var
per_replica_outputs = distribution.run(
func, (next(input_iterator),))
mean = distribution.reduce(
reduce_util.ReduceOp.MEAN, per_replica_outputs, axis=None)
for _ in dataset_ops.Dataset.range(1):
per_replica_outputs = distribution.run(
func, (next(input_iterator),))
mean = distribution.reduce(
reduce_util.ReduceOp.MEAN, per_replica_outputs, axis=None)
return mean
with distribution.scope():
if distribution.num_replicas_in_sync == 1:
self.assertAlmostEqual(10.0, self.evaluate(train_step(input_iterator)))
else:
self.assertAlmostEqual(7.5, self.evaluate(train_step(input_iterator)))
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def testDatasetOutOfRange(self, distribution):
with distribution.scope():
a = variables.Variable(
0.0, aggregation=variables.VariableAggregation.SUM)
def train_step(val):
a.assign_add(math_ops.reduce_sum(val))
@def_function.function
def f_train_step(iterator):
distribution.run(train_step, args=(next(iterator),))
return a
dataset = get_dataset_from_tensor_slices([5., 6., 7., 8.]).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
iterator = iter(dist_dataset)
with self.assertRaises(errors.OutOfRangeError):
for _ in range(100):
f_train_step(iterator)
self.assertAlmostEqual(26.0, a.numpy())
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.multidevice_strategies,
mode=["eager"]))
def testComputeLossWithDynamicShapes(self, distribution):
dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
@def_function.function
def run(iterator):
def computation(x):
return losses.compute_weighted_loss(x, weights=array_ops.ones_like(x))
inputs = next(iterator)
outputs = distribution.experimental_local_results(
distribution.run(computation, args=(inputs,)))
return outputs
# This assumes that there are exactly 2 replicas
self.assertAllEqual([5.5, 7.], run(input_iterator))
if __name__ == "__main__":
test_util.main()
|
willingc/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/web/xmlrpc.py
|
18
|
# -*- test-case-name: twisted.web.test.test_xmlrpc -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A generic resource for publishing objects via XML-RPC.
Maintainer: Itamar Shtull-Trauring
"""
# System Imports
import sys, xmlrpclib, urlparse
# Sibling Imports
from twisted.web import resource, server, http
from twisted.internet import defer, protocol, reactor
from twisted.python import log, reflect, failure
# These are deprecated, use the class level definitions
NOT_FOUND = 8001
FAILURE = 8002
# Useful so people don't need to import xmlrpclib directly
Fault = xmlrpclib.Fault
Binary = xmlrpclib.Binary
Boolean = xmlrpclib.Boolean
DateTime = xmlrpclib.DateTime
# On Python 2.4 and earlier, DateTime.decode returns unicode.
if sys.version_info[:2] < (2, 5):
_decode = DateTime.decode
DateTime.decode = lambda self, value: _decode(self, value.encode('ascii'))
def withRequest(f):
"""
Decorator to cause the request to be passed as the first argument
to the method.
If an I{xmlrpc_} method is wrapped with C{withRequest}, the
request object is passed as the first argument to that method.
For example::
@withRequest
def xmlrpc_echo(self, request, s):
return s
@since: 10.2
"""
f.withRequest = True
return f
class NoSuchFunction(Fault):
"""
There is no function by the given name.
"""
class Handler:
"""
Handle a XML-RPC request and store the state for a request in progress.
Override the run() method and return result using self.result,
a Deferred.
We require this class since we're not using threads, so we can't
encapsulate state in a running function if we're going to have
to wait for results.
For example, lets say we want to authenticate against twisted.cred,
run a LDAP query and then pass its result to a database query, all
as a result of a single XML-RPC command. We'd use a Handler instance
to store the state of the running command.
"""
def __init__(self, resource, *args):
self.resource = resource # the XML-RPC resource we are connected to
self.result = defer.Deferred()
self.run(*args)
def run(self, *args):
# event driven equivalent of 'raise UnimplementedError'
self.result.errback(
NotImplementedError("Implement run() in subclasses"))
class XMLRPC(resource.Resource):
"""
A resource that implements XML-RPC.
You probably want to connect this to '/RPC2'.
Methods published can return XML-RPC serializable results, Faults,
Binary, Boolean, DateTime, Deferreds, or Handler instances.
By default methods beginning with 'xmlrpc_' are published.
Sub-handlers for prefixed methods (e.g., system.listMethods)
can be added with putSubHandler. By default, prefixes are
separated with a '.'. Override self.separator to change this.
@ivar allowNone: Permit XML translating of Python constant None.
@type allowNone: C{bool}
@ivar useDateTime: Present datetime values as datetime.datetime objects?
Requires Python >= 2.5.
@type useDateTime: C{bool}
"""
# Error codes for Twisted, if they conflict with yours then
# modify them at runtime.
NOT_FOUND = 8001
FAILURE = 8002
isLeaf = 1
separator = '.'
allowedMethods = ('POST',)
def __init__(self, allowNone=False, useDateTime=False):
resource.Resource.__init__(self)
self.subHandlers = {}
self.allowNone = allowNone
self.useDateTime = useDateTime
def __setattr__(self, name, value):
if name == "useDateTime" and value and sys.version_info[:2] < (2, 5):
raise RuntimeError("useDateTime requires Python 2.5 or later.")
self.__dict__[name] = value
def putSubHandler(self, prefix, handler):
self.subHandlers[prefix] = handler
def getSubHandler(self, prefix):
return self.subHandlers.get(prefix, None)
def getSubHandlerPrefixes(self):
return self.subHandlers.keys()
def render_POST(self, request):
request.content.seek(0, 0)
request.setHeader("content-type", "text/xml")
try:
if self.useDateTime:
args, functionPath = xmlrpclib.loads(request.content.read(),
use_datetime=True)
else:
# Maintain backwards compatibility with Python < 2.5
args, functionPath = xmlrpclib.loads(request.content.read())
except Exception, e:
f = Fault(self.FAILURE, "Can't deserialize input: %s" % (e,))
self._cbRender(f, request)
else:
try:
function = self._getFunction(functionPath)
except Fault, f:
self._cbRender(f, request)
else:
# Use this list to track whether the response has failed or not.
# This will be used later on to decide if the result of the
# Deferred should be written out and Request.finish called.
responseFailed = []
request.notifyFinish().addErrback(responseFailed.append)
if getattr(function, 'withRequest', False):
d = defer.maybeDeferred(function, request, *args)
else:
d = defer.maybeDeferred(function, *args)
d.addErrback(self._ebRender)
d.addCallback(self._cbRender, request, responseFailed)
return server.NOT_DONE_YET
def _cbRender(self, result, request, responseFailed=None):
if responseFailed:
return
if isinstance(result, Handler):
result = result.result
if not isinstance(result, Fault):
result = (result,)
try:
try:
content = xmlrpclib.dumps(
result, methodresponse=True,
allow_none=self.allowNone)
except Exception, e:
f = Fault(self.FAILURE, "Can't serialize output: %s" % (e,))
content = xmlrpclib.dumps(f, methodresponse=True,
allow_none=self.allowNone)
request.setHeader("content-length", str(len(content)))
request.write(content)
except:
log.err()
request.finish()
def _ebRender(self, failure):
if isinstance(failure.value, Fault):
return failure.value
log.err(failure)
return Fault(self.FAILURE, "error")
def _getFunction(self, functionPath):
"""
Given a string, return a function, or raise NoSuchFunction.
This returned function will be called, and should return the result
of the call, a Deferred, or a Fault instance.
Override in subclasses if you want your own policy. The default
policy is that given functionPath 'foo', return the method at
self.xmlrpc_foo, i.e. getattr(self, "xmlrpc_" + functionPath).
If functionPath contains self.separator, the sub-handler for
the initial prefix is used to search for the remaining path.
"""
if functionPath.find(self.separator) != -1:
prefix, functionPath = functionPath.split(self.separator, 1)
handler = self.getSubHandler(prefix)
if handler is None:
raise NoSuchFunction(self.NOT_FOUND,
"no such subHandler %s" % prefix)
return handler._getFunction(functionPath)
f = getattr(self, "xmlrpc_%s" % functionPath, None)
if not f:
raise NoSuchFunction(self.NOT_FOUND,
"function %s not found" % functionPath)
elif not callable(f):
raise NoSuchFunction(self.NOT_FOUND,
"function %s not callable" % functionPath)
else:
return f
def _listFunctions(self):
"""
Return a list of the names of all xmlrpc methods.
"""
return reflect.prefixedMethodNames(self.__class__, 'xmlrpc_')
class XMLRPCIntrospection(XMLRPC):
"""
Implement the XML-RPC Introspection API.
By default, the methodHelp method returns the 'help' method attribute,
if it exists, otherwise the __doc__ method attribute, if it exists,
otherwise the empty string.
To enable the methodSignature method, add a 'signature' method attribute
containing a list of lists. See methodSignature's documentation for the
format. Note the type strings should be XML-RPC types, not Python types.
"""
def __init__(self, parent):
"""
Implement Introspection support for an XMLRPC server.
@param parent: the XMLRPC server to add Introspection support to.
@type parent: L{XMLRPC}
"""
XMLRPC.__init__(self)
self._xmlrpc_parent = parent
def xmlrpc_listMethods(self):
"""
Return a list of the method names implemented by this server.
"""
functions = []
todo = [(self._xmlrpc_parent, '')]
while todo:
obj, prefix = todo.pop(0)
functions.extend([prefix + name for name in obj._listFunctions()])
todo.extend([ (obj.getSubHandler(name),
prefix + name + obj.separator)
for name in obj.getSubHandlerPrefixes() ])
return functions
xmlrpc_listMethods.signature = [['array']]
def xmlrpc_methodHelp(self, method):
"""
Return a documentation string describing the use of the given method.
"""
method = self._xmlrpc_parent._getFunction(method)
return (getattr(method, 'help', None)
or getattr(method, '__doc__', None) or '')
xmlrpc_methodHelp.signature = [['string', 'string']]
def xmlrpc_methodSignature(self, method):
"""
Return a list of type signatures.
Each type signature is a list of the form [rtype, type1, type2, ...]
where rtype is the return type and typeN is the type of the Nth
argument. If no signature information is available, the empty
string is returned.
"""
method = self._xmlrpc_parent._getFunction(method)
return getattr(method, 'signature', None) or ''
xmlrpc_methodSignature.signature = [['array', 'string'],
['string', 'string']]
def addIntrospection(xmlrpc):
"""
Add Introspection support to an XMLRPC server.
@param parent: the XMLRPC server to add Introspection support to.
@type parent: L{XMLRPC}
"""
xmlrpc.putSubHandler('system', XMLRPCIntrospection(xmlrpc))
class QueryProtocol(http.HTTPClient):
def connectionMade(self):
self._response = None
self.sendCommand('POST', self.factory.path)
self.sendHeader('User-Agent', 'Twisted/XMLRPClib')
self.sendHeader('Host', self.factory.host)
self.sendHeader('Content-type', 'text/xml')
self.sendHeader('Content-length', str(len(self.factory.payload)))
if self.factory.user:
auth = '%s:%s' % (self.factory.user, self.factory.password)
auth = auth.encode('base64').strip()
self.sendHeader('Authorization', 'Basic %s' % (auth,))
self.endHeaders()
self.transport.write(self.factory.payload)
def handleStatus(self, version, status, message):
if status != '200':
self.factory.badStatus(status, message)
def handleResponse(self, contents):
"""
Handle the XML-RPC response received from the server.
Specifically, disconnect from the server and store the XML-RPC
response so that it can be properly handled when the disconnect is
finished.
"""
self.transport.loseConnection()
self._response = contents
def connectionLost(self, reason):
"""
The connection to the server has been lost.
If we have a full response from the server, then parse it and fired a
Deferred with the return value or C{Fault} that the server gave us.
"""
http.HTTPClient.connectionLost(self, reason)
if self._response is not None:
response, self._response = self._response, None
self.factory.parseResponse(response)
payloadTemplate = """<?xml version="1.0"?>
<methodCall>
<methodName>%s</methodName>
%s
</methodCall>
"""
class _QueryFactory(protocol.ClientFactory):
"""
XML-RPC Client Factory
@ivar path: The path portion of the URL to which to post method calls.
@type path: C{str}
@ivar host: The value to use for the Host HTTP header.
@type host: C{str}
@ivar user: The username with which to authenticate with the server
when making calls.
@type user: C{str} or C{NoneType}
@ivar password: The password with which to authenticate with the server
when making calls.
@type password: C{str} or C{NoneType}
@ivar useDateTime: Accept datetime values as datetime.datetime objects.
also passed to the underlying xmlrpclib implementation. Default to
False. Requires Python >= 2.5.
@type useDateTime: C{bool}
"""
deferred = None
protocol = QueryProtocol
def __init__(self, path, host, method, user=None, password=None,
allowNone=False, args=(), canceller=None, useDateTime=False):
"""
@param method: The name of the method to call.
@type method: C{str}
@param allowNone: allow the use of None values in parameters. It's
passed to the underlying xmlrpclib implementation. Default to False.
@type allowNone: C{bool} or C{NoneType}
@param args: the arguments to pass to the method.
@type args: C{tuple}
@param canceller: A 1-argument callable passed to the deferred as the
canceller callback.
@type canceller: callable or C{NoneType}
"""
self.path, self.host = path, host
self.user, self.password = user, password
self.payload = payloadTemplate % (method,
xmlrpclib.dumps(args, allow_none=allowNone))
self.deferred = defer.Deferred(canceller)
self.useDateTime = useDateTime
def parseResponse(self, contents):
if not self.deferred:
return
try:
if self.useDateTime:
response = xmlrpclib.loads(contents,
use_datetime=True)[0][0]
else:
# Maintain backwards compatibility with Python < 2.5
response = xmlrpclib.loads(contents)[0][0]
except:
deferred, self.deferred = self.deferred, None
deferred.errback(failure.Failure())
else:
deferred, self.deferred = self.deferred, None
deferred.callback(response)
def clientConnectionLost(self, _, reason):
if self.deferred is not None:
deferred, self.deferred = self.deferred, None
deferred.errback(reason)
clientConnectionFailed = clientConnectionLost
def badStatus(self, status, message):
deferred, self.deferred = self.deferred, None
deferred.errback(ValueError(status, message))
class Proxy:
"""
A Proxy for making remote XML-RPC calls.
Pass the URL of the remote XML-RPC server to the constructor.
Use proxy.callRemote('foobar', *args) to call remote method
'foobar' with *args.
@ivar user: The username with which to authenticate with the server
when making calls. If specified, overrides any username information
embedded in C{url}. If not specified, a value may be taken from
C{url} if present.
@type user: C{str} or C{NoneType}
@ivar password: The password with which to authenticate with the server
when making calls. If specified, overrides any password information
embedded in C{url}. If not specified, a value may be taken from
C{url} if present.
@type password: C{str} or C{NoneType}
@ivar allowNone: allow the use of None values in parameters. It's
passed to the underlying xmlrpclib implementation. Default to False.
@type allowNone: C{bool} or C{NoneType}
@ivar useDateTime: Accept datetime values as datetime.datetime objects.
also passed to the underlying xmlrpclib implementation. Default to
False. Requires Python >= 2.5.
@type useDateTime: C{bool}
@ivar connectTimeout: Number of seconds to wait before assuming the
connection has failed.
@type connectTimeout: C{float}
@ivar _reactor: the reactor used to create connections.
@type _reactor: object providing L{twisted.internet.interfaces.IReactorTCP}
@ivar queryFactory: object returning a factory for XML-RPC protocol. Mainly
useful for tests.
"""
queryFactory = _QueryFactory
def __init__(self, url, user=None, password=None, allowNone=False,
useDateTime=False, connectTimeout=30.0, reactor=reactor):
"""
@param url: The URL to which to post method calls. Calls will be made
over SSL if the scheme is HTTPS. If netloc contains username or
password information, these will be used to authenticate, as long as
the C{user} and C{password} arguments are not specified.
@type url: C{str}
"""
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
netlocParts = netloc.split('@')
if len(netlocParts) == 2:
userpass = netlocParts.pop(0).split(':')
self.user = userpass.pop(0)
try:
self.password = userpass.pop(0)
except:
self.password = None
else:
self.user = self.password = None
hostport = netlocParts[0].split(':')
self.host = hostport.pop(0)
try:
self.port = int(hostport.pop(0))
except:
self.port = None
self.path = path
if self.path in ['', None]:
self.path = '/'
self.secure = (scheme == 'https')
if user is not None:
self.user = user
if password is not None:
self.password = password
self.allowNone = allowNone
self.useDateTime = useDateTime
self.connectTimeout = connectTimeout
self._reactor = reactor
def __setattr__(self, name, value):
if name == "useDateTime" and value and sys.version_info[:2] < (2, 5):
raise RuntimeError("useDateTime requires Python 2.5 or later.")
self.__dict__[name] = value
def callRemote(self, method, *args):
"""
Call remote XML-RPC C{method} with given arguments.
@return: a L{defer.Deferred} that will fire with the method response,
or a failure if the method failed. Generally, the failure type will
be L{Fault}, but you can also have an C{IndexError} on some buggy
servers giving empty responses.
If the deferred is cancelled before the request completes, the
connection is closed and the deferred will fire with a
L{defer.CancelledError}.
"""
def cancel(d):
factory.deferred = None
connector.disconnect()
factory = self.queryFactory(
self.path, self.host, method, self.user,
self.password, self.allowNone, args, cancel, self.useDateTime)
if self.secure:
from twisted.internet import ssl
connector = self._reactor.connectSSL(
self.host, self.port or 443,
factory, ssl.ClientContextFactory(),
timeout=self.connectTimeout)
else:
connector = self._reactor.connectTCP(
self.host, self.port or 80, factory,
timeout=self.connectTimeout)
return factory.deferred
__all__ = [
"XMLRPC", "Handler", "NoSuchFunction", "Proxy",
"Fault", "Binary", "Boolean", "DateTime"]
|
ongair/yowsup
|
refs/heads/master
|
yowsup/layers/protocol_ib/protocolentities/test_dirty_ib.py
|
70
|
from yowsup.layers.protocol_ib.protocolentities.test_ib import IbProtocolEntityTest
from yowsup.layers.protocol_ib.protocolentities.dirty_ib import DirtyIbProtocolEntity
from yowsup.structs import ProtocolTreeNode
class DirtyIbProtocolEntityTest(IbProtocolEntityTest):
def setUp(self):
super(DirtyIbProtocolEntityTest, self).setUp()
self.ProtocolEntity = DirtyIbProtocolEntity
dirtyNode = ProtocolTreeNode("dirty")
dirtyNode["timestamp"] = "123456"
dirtyNode["type"] = "groups"
self.node.addChild(dirtyNode)
|
trondhindenes/ansible
|
refs/heads/devel
|
test/units/modules/cloud/openstack/test_os_server.py
|
60
|
import collections
import inspect
import mock
import pytest
import yaml
from ansible.module_utils.six import string_types
from ansible.modules.cloud.openstack import os_server
class AnsibleFail(Exception):
pass
class AnsibleExit(Exception):
pass
def params_from_doc(func):
'''This function extracts the docstring from the specified function,
parses it as a YAML document, and returns parameters for the os_server
module.'''
doc = inspect.getdoc(func)
cfg = yaml.load(doc)
for task in cfg:
for module, params in task.items():
for k, v in params.items():
if k in ['nics'] and isinstance(v, string_types):
params[k] = [v]
task[module] = collections.defaultdict(str,
params)
return cfg[0]['os_server']
class FakeCloud(object):
ports = [
{'name': 'port1', 'id': '1234'},
{'name': 'port2', 'id': '4321'},
]
networks = [
{'name': 'network1', 'id': '5678'},
{'name': 'network2', 'id': '8765'},
]
images = [
{'name': 'cirros', 'id': '1'},
{'name': 'fedora', 'id': '2'},
]
flavors = [
{'name': 'm1.small', 'id': '1', 'flavor_ram': 1024},
{'name': 'm1.tiny', 'id': '2', 'flavor_ram': 512},
]
def _find(self, source, name):
for item in source:
if item['name'] == name or item['id'] == name:
return item
def get_image_id(self, name, exclude=None):
image = self._find(self.images, name)
if image:
return image['id']
def get_flavor(self, name):
return self._find(self.flavors, name)
def get_flavor_by_ram(self, ram, include=None):
for flavor in self.flavors:
if flavor['ram'] >= ram and (include is None or include in
flavor['name']):
return flavor
def get_port(self, name):
return self._find(self.ports, name)
def get_network(self, name):
return self._find(self.networks, name)
def get_openstack_vars(self, server):
return server
create_server = mock.MagicMock()
class TestNetworkArgs(object):
'''This class exercises the _network_args function of the
os_server module. For each test, we parse the YAML document
contained in the docstring to retrieve the module parameters for the
test.'''
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock()
self.module.params = params_from_doc(method)
def test_nics_string_net_id(self):
'''
- os_server:
nics: net-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_string_net_id_list(self):
'''
- os_server:
nics: net-id=1234,net-id=4321
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['net-id'] == '4321')
def test_nics_string_port_id(self):
'''
- os_server:
nics: port-id=1234
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_string_net_name(self):
'''
- os_server:
nics: net-name=network1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '5678')
def test_nics_string_port_name(self):
'''
- os_server:
nics: port-name=port1
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['port-id'] == '1234')
def test_nics_structured_net_id(self):
'''
- os_server:
nics:
- net-id: '1234'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
def test_nics_structured_mixed(self):
'''
- os_server:
nics:
- net-id: '1234'
- port-name: port1
- 'net-name=network1,port-id=4321'
'''
args = os_server._network_args(self.module, self.cloud)
assert(args[0]['net-id'] == '1234')
assert(args[1]['port-id'] == '1234')
assert(args[2]['net-id'] == '5678')
assert(args[3]['port-id'] == '4321')
class TestCreateServer(object):
def setup_method(self, method):
self.cloud = FakeCloud()
self.module = mock.MagicMock()
self.module.params = params_from_doc(method)
self.module.fail_json.side_effect = AnsibleFail()
self.module.exit_json.side_effect = AnsibleExit()
self.meta = mock.MagicMock()
self.meta.gett_hostvars_from_server.return_value = {
'id': '1234'
}
os_server.meta = self.meta
def test_create_server(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: network1
meta:
- key: value
'''
with pytest.raises(AnsibleExit):
os_server._create_server(self.module, self.cloud)
assert(self.cloud.create_server.call_count == 1)
assert(self.cloud.create_server.call_args[1]['image'] == self.cloud.get_image_id('cirros'))
assert(self.cloud.create_server.call_args[1]['flavor'] == self.cloud.get_flavor('m1.tiny')['id'])
assert(self.cloud.create_server.call_args[1]['nics'][0]['net-id'] == self.cloud.get_network('network1')['id'])
def test_create_server_bad_flavor(self):
'''
- os_server:
image: cirros
flavor: missing_flavor
nics:
- net-name: network1
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_flavor' in
self.module.fail_json.call_args[1]['msg'])
def test_create_server_bad_nic(self):
'''
- os_server:
image: cirros
flavor: m1.tiny
nics:
- net-name: missing_network
'''
with pytest.raises(AnsibleFail):
os_server._create_server(self.module, self.cloud)
assert('missing_network' in
self.module.fail_json.call_args[1]['msg'])
|
zchking/odoo
|
refs/heads/8.0
|
addons/crm_profiling/wizard/__init__.py
|
438
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import open_questionnaire
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
leifos/simiir
|
refs/heads/master
|
simiir/query_generators/tri_term_generator_reversed.py
|
1
|
from simiir.query_generators.tri_term_generator import TriTermQueryGenerator
class TriTermQueryGeneratorReversed(TriTermQueryGenerator):
"""
Implementing Strategy 3 from Heikki's 2009 paper, generating three-term queries.
The first two terms are drawn from the topic, with the final and third term selected from the description - in some ranked order.
Reverses the queries.
"""
def __init__(self, stopword_file, background_file=[]):
super(TriTermQueryGeneratorReversed, self).__init__(stopword_file, background_file=background_file)
def generate_query_list(self, search_context):
"""
Takes the query list from the underlying query generator (tri-term), and reverses it.
"""
topic = search_context.topic
queries = super(TriTermQueryGeneratorReversed, self).generate_query_list(search_context)
queries.reverse()
return queries
|
Lipen/LipenDev
|
refs/heads/master
|
Azeroth/Northrend/DOB3/getter.py
|
1
|
# path = 'seqs/seqERV3-1_macaque_chr2.fasta'
# pos_ltr5 = (96109, 95584)
# pos_ltr3 = (88903, 88379)
# path = 'seqs/seqERV3-1_pan.fasta'
# pos_ltr5 = (579, 1190)
# pos_ltr3 = (9604, 10206)
# path = 'seqs/seqERVIPF-10H.fasta'
# pos_ltr5 = (5377, 4938)
# pos_ltr3 = (1064, 619)
# path = 'seqs/seqERV3-1_macaque_chr3.fasta'
# pos_ltr5 = (2194, 2803)
# pos_ltr3 = (11225, 11824)
# path = 'seqs/seqERV3-1_hg38.fasta'
# pos_ltr5 = (7392, 7983)
# pos_ltr3 = (16412, 17004)
path = 'seqs/seqERVFRD-1_hg38.fasta'
pos_ltr5 = (12087, 11598)
pos_ltr3 = (3319, 2831)
with open(path) as f:
data = ''.join(line.strip() for line in f.readlines()[1:])
i, j = pos_ltr5
if i < j:
ltr5 = data[i - 1:j]
else:
ltr5 = data[j - 1:i][::-1]
i, j = pos_ltr3
if i < j:
ltr3 = data[i - 1:j]
else:
ltr3 = data[j - 1:i][::-1]
print('>5\'LTR\n{}\n>3\'LTR\n{}'.format(ltr5, ltr3))
|
eoyilmaz/anima
|
refs/heads/master
|
anima/exc.py
|
1
|
# -*- coding: utf-8 -*-
"""This module contains exceptions
"""
class PublishError(RuntimeError):
"""Raised when the published version is not matching the quality
"""
pass
|
yanheven/cinder
|
refs/heads/master
|
cinder/volume/targets/scst.py
|
6
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE
from cinder.volume.targets import iscsi
from cinder.volume import utils as vutils
LOG = logging.getLogger(__name__)
class SCSTAdm(iscsi.ISCSITarget):
def __init__(self, *args, **kwargs):
super(SCSTAdm, self).__init__(*args, **kwargs)
self.volumes_dir = self.configuration.safe_get('volumes_dir')
self.iscsi_target_prefix = self.configuration.safe_get(
'iscsi_target_prefix')
self.target_name = self.configuration.safe_get('scst_target_iqn_name')
self.target_driver = self.configuration.safe_get('scst_target_driver')
self.chap_username = self.configuration.safe_get('chap_username')
self.chap_password = self.configuration.safe_get('chap_password')
self.initiator_iqn = None
self.remove_initiator_iqn = None
def scst_execute(self, *args):
return utils.execute('scstadmin', *args, run_as_root=True)
def validate_connector(self, connector):
# iSCSI drivers require the initiator information
if 'initiator' not in connector:
err_msg = _('The volume driver requires the iSCSI initiator '
'name in the connector.')
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
self.initiator_iqn = connector['initiator']
def terminate_connection(self, volume, connector, **kwargs):
self.remove_initiator_iqn = connector['initiator']
def _get_target(self, iqn):
(out, _err) = self.scst_execute('-list_target')
if iqn in out:
return self._target_attribute(iqn)
return None
def _target_attribute(self, iqn):
(out, _err) = self.scst_execute('-list_tgt_attr', iqn,
'-driver', self.target_driver)
lines = out.split('\n')
for line in lines:
if "rel_tgt_id" in line:
parsed = line.split()
return parsed[1]
def _get_group(self):
scst_group = "%s%s" % (self.initiator_iqn, self.target_name)
(out, _err) = self.scst_execute('-list_group')
if scst_group in out:
return out
return None
def _get_luns_info(self):
scst_group = "%s%s" % (self.initiator_iqn, self.target_name)
(out, _err) = self.scst_execute('-list_group', scst_group,
'-driver', self.target_driver,
'-target', self.target_name)
first = "Assigned LUNs:"
last = "Assigned Initiators:"
start = out.index(first) + len(first)
end = out.index(last, start)
out = out[start:end]
luns = []
for line in out.strip().split("\n")[2:]:
luns.append(int(line.strip().split(" ")[0]))
luns = sorted(set(luns))
return luns
def _get_target_and_lun(self, context, volume):
iscsi_target = 0
if not self.target_name or not self._get_group():
lun = 1
return iscsi_target, lun
luns = self._get_luns_info()
if (not luns) or (luns[0] != 1):
lun = 1
return iscsi_target, lun
else:
for lun in luns:
if (luns[-1] == lun) or (luns[lun - 1] + 1 != luns[lun]):
return iscsi_target, (lun + 1)
def create_iscsi_target(self, name, vol_id, tid, lun, path,
chap_auth=None):
scst_group = "%s%s" % (self.initiator_iqn, self.target_name)
vol_name = path.split("/")[3]
try:
(out, _err) = self.scst_execute('-noprompt',
'-set_drv_attr',
self.target_driver,
'-attributes',
'enabled=1')
LOG.debug('StdOut from set driver attribute: %s', out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to set attribute for enable target driver "
"%s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to enable SCST Target driver.")
if self._get_target(name) is None:
try:
(out, _err) = self.scst_execute('-add_target', name,
'-driver', self.target_driver)
LOG.debug("StdOut from scstadmin create target: %s", out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to create iscsi target for volume "
"id:%(vol_id)s: %(e)s"), {'vol_id': name, 'e': e})
raise exception.ISCSITargetCreateFailed(volume_id=vol_name)
try:
(out, _err) = self.scst_execute('-enable_target', name,
'-driver', self.target_driver)
LOG.debug("StdOut from scstadmin enable target: %s", out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to set 'enable' attribute for "
"SCST target %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_mesage="Failed to enable SCST Target.")
if chap_auth and self.target_name:
try:
chap_string = self._iscsi_authentication('IncomingUser=',
*chap_auth)
(out, _err) = self.scst_execute('-noprompt',
'-set_tgt_attr', name,
'-driver',
self.target_driver,
'-attributes',
chap_string)
LOG.debug("StdOut from scstadmin set target attribute:"
" %s.", out)
except putils.ProcessExecutionError:
msg = _("Failed to set attribute 'Incoming user' for "
"SCST target.")
LOG.exception(msg)
raise exception.ISCSITargetHelperCommandFailed(
error_mesage=msg)
if self.target_name:
if self._get_group() is None:
try:
(out, _err) = self.scst_execute('-add_group', scst_group,
'-driver',
self.target_driver,
'-target', name)
LOG.debug("StdOut from scstadmin create group: %s", out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to create group to SCST target "
"%s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to create group to SCST target.")
try:
(out, _err) = self.scst_execute('-add_init',
self.initiator_iqn,
'-driver', self.target_driver,
'-target', name,
'-group', scst_group)
LOG.debug("StdOut from scstadmin add initiator: %s", out)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to add initiator to group "
" for SCST target %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to add Initiator to group for "
"SCST target.")
tid = self._get_target(name)
if self.target_name is None:
disk_id = "disk%s" % tid
else:
disk_id = "%s%s" % (lun, vol_id.split('-')[-1])
try:
self.scst_execute('-open_dev', disk_id,
'-handler', 'vdisk_fileio',
'-attributes', 'filename=%s' % path)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to add device to handler %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to add device to SCST handler.")
try:
if self.target_name:
self.scst_execute('-add_lun', lun,
'-driver', self.target_driver,
'-target', name,
'-device', disk_id,
'-group', scst_group)
else:
self.scst_execute('-add_lun', lun,
'-driver', self.target_driver,
'-target', name,
'-device', disk_id)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to add lun to SCST target "
"id:%(vol_id)s: %(e)s"), {'vol_id': name, 'e': e})
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to add LUN to SCST Target for "
"volume " + vol_name)
# SCST uses /etc/scst.conf as the default configuration when it
# starts
try:
self.scst_execute('-write_config', '/etc/scst.conf')
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to write in /etc/scst.conf."))
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to write in /etc/scst.conf.")
return tid
def _iscsi_location(self, ip, target, iqn, lun=None):
return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port,
target, iqn, lun)
def _get_iscsi_target(self, context, vol_id):
# FIXME(jdg): Need to implement abc method
pass
def _get_target_chap_auth(self, context, iscsi_name):
# FIXME(jdg): Need to implement abc method
if self._get_target(iscsi_name) is None:
return None
(out, _err) = self.scst_execute('-list_tgt_attr', iscsi_name,
'-driver', self.target_driver)
first = "KEY"
last = "Dynamic attributes"
start = out.index(first) + len(first)
end = out.index(last, start)
out = out[start:end]
out = out.split("\n")[2]
if "IncomingUser" in out:
out = out.split(" ")
out = filter(lambda a: a != "", out)
return (out[1], out[2])
else:
return None
def ensure_export(self, context, volume, volume_path):
iscsi_target, lun = self._get_target_and_lun(context, volume)
if self.target_name is None:
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
else:
iscsi_name = self.target_name
if self.chap_username and self.chap_password:
chap_auth = (self.chap_username, self.chap_password)
else:
chap_auth = self._get_target_chap_auth(context, iscsi_name)
self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target,
lun, volume_path, chap_auth)
def create_export(self, context, volume, volume_path):
"""Creates an export for a logical volume."""
iscsi_target, lun = self._get_target_and_lun(context, volume)
if self.target_name is None:
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
else:
iscsi_name = self.target_name
if self.chap_username and self.chap_password:
chap_auth = (self.chap_username, self.chap_password)
else:
chap_auth = self._get_target_chap_auth(context, iscsi_name)
if not chap_auth:
chap_auth = (vutils.generate_username(),
vutils.generate_password())
tid = self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target,
lun, volume_path, chap_auth)
data = {}
data['location'] = self._iscsi_location(
self.configuration.iscsi_ip_address, tid, iscsi_name, lun)
LOG.debug('Set provider_location to: %s', data['location'])
data['auth'] = self._iscsi_authentication(
'CHAP', *chap_auth)
return data
def remove_export(self, context, volume):
try:
location = volume['provider_location'].split(' ')
iqn = location[1]
iscsi_target = self._get_target(iqn)
self.show_target(iscsi_target, iqn)
except Exception:
LOG.error(_LE("Skipping remove_export. No iscsi_target is"
"presently exported for volume: %s"), volume['id'])
return
vol = self.db.volume_get(context, volume['id'])
lun = "".join(vol['provider_location'].split(" ")[-1:])
self.remove_iscsi_target(iscsi_target, lun,
volume['id'], volume['name'])
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
disk_id = "%s%s" % (lun, vol_id.split('-')[-1])
vol_uuid_file = vol_name
if self.target_name is None:
iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file)
else:
iqn = self.target_name
if self.target_name is None:
try:
self.scst_execute('-noprompt',
'-rem_target', iqn,
'-driver', 'iscsi')
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to remove iscsi target for volume "
"id:%(vol_id)s: %(e)s"), {'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
self.scst_execute('-noprompt',
'-close_dev', "disk%s" % tid,
'-handler', 'vdisk_fileio')
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to close disk device %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to close disk device for "
"SCST handler.")
if self._get_target(iqn):
try:
self.scst_execute('-noprompt',
'-rem_target', iqn,
'-driver', self.target_driver)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to remove iscsi target for "
"volume id:%(vol_id)s: %(e)s"),
{'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
else:
if not int(lun) in self._get_luns_info():
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
scst_group = "%s%s" % (self.remove_initiator_iqn,
self.target_name)
self.scst_execute('-noprompt', '-rem_lun', lun,
'-driver', self.target_driver,
'-target', iqn, '-group',
scst_group)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to remove LUN %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to remove LUN for SCST Target.")
try:
self.scst_execute('-noprompt',
'-close_dev', disk_id,
'-handler', 'vdisk_fileio')
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to close disk device %s"), e)
raise exception.ISCSITargetHelperCommandFailed(
error_message="Failed to close disk device for "
"SCST handler.")
self.scst_execute('-write_config', '/etc/scst.conf')
def show_target(self, tid, iqn):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.ISCSITargetHelperCommandFailed(
error_message="Target not found")
def initialize_connection(self, volume, connector):
iscsi_properties = self._get_iscsi_properties(volume)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
|
doganov/edx-platform
|
refs/heads/master
|
cms/djangoapps/course_creators/tests/__init__.py
|
12133432
| |
britcey/ansible
|
refs/heads/devel
|
test/runner/lib/delegation.py
|
24
|
"""Delegate test execution to another environment."""
from __future__ import absolute_import, print_function
import os
import re
import sys
import tempfile
import lib.pytar
import lib.thread
from lib.executor import (
SUPPORTED_PYTHON_VERSIONS,
IntegrationConfig,
ShellConfig,
SanityConfig,
UnitsConfig,
create_shell_command,
)
from lib.test import (
TestConfig,
)
from lib.core_ci import (
AnsibleCoreCI,
)
from lib.manage_ci import (
ManagePosixCI,
)
from lib.util import (
ApplicationError,
EnvironmentConfig,
run_command,
common_environment,
pass_vars,
)
from lib.docker_util import (
docker_exec,
docker_get,
docker_pull,
docker_put,
docker_rm,
docker_run,
)
from lib.cloud import (
get_cloud_providers,
)
def delegate(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:rtype: bool
"""
if isinstance(args, TestConfig):
with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=os.getcwd()) as metadata_fd:
args.metadata_path = os.path.basename(metadata_fd.name)
args.metadata.to_file(args.metadata_path)
try:
return delegate_command(args, exclude, require)
finally:
args.metadata_path = None
else:
return delegate_command(args, exclude, require)
def delegate_command(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:rtype: bool
"""
if args.tox:
delegate_tox(args, exclude, require)
return True
if args.docker:
delegate_docker(args, exclude, require)
return True
if args.remote:
delegate_remote(args, exclude, require)
return True
return False
def delegate_tox(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
"""
if args.python:
versions = args.python,
if args.python not in SUPPORTED_PYTHON_VERSIONS:
raise ApplicationError('tox does not support Python version %s' % args.python)
else:
versions = SUPPORTED_PYTHON_VERSIONS
options = {
'--tox': args.tox_args,
'--tox-sitepackages': 0,
}
for version in versions:
tox = ['tox', '-c', 'test/runner/tox.ini', '-e', 'py' + version.replace('.', '')]
if args.tox_sitepackages:
tox.append('--sitepackages')
tox.append('--')
cmd = generate_command(args, os.path.abspath('test/runner/test.py'), options, exclude, require)
if not args.python:
cmd += ['--python', version]
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'tox-%s' % version]
env = common_environment()
# temporary solution to permit ansible-test delegated to tox to provision remote resources
optional = (
'SHIPPABLE',
'SHIPPABLE_BUILD_ID',
'SHIPPABLE_JOB_NUMBER',
)
env.update(pass_vars(required=[], optional=optional))
run_command(args, tox + cmd, env=env)
def delegate_docker(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
"""
util_image = args.docker_util
test_image = args.docker
privileged = args.docker_privileged
if util_image:
docker_pull(args, util_image)
docker_pull(args, test_image)
util_id = None
test_id = None
options = {
'--docker': 1,
'--docker-privileged': 0,
'--docker-util': 1,
}
cmd = generate_command(args, '/root/ansible/test/runner/test.py', options, exclude, require)
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
image_label = re.sub('^ansible/ansible:', '', args.docker)
image_label = re.sub('[^a-zA-Z0-9]+', '-', image_label)
cmd += ['--coverage-label', 'docker-%s' % image_label]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
cmd_options = []
if isinstance(args, ShellConfig):
cmd_options.append('-it')
with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
try:
if not args.explain:
lib.pytar.create_tarfile(local_source_fd.name, '.', lib.pytar.ignore)
if util_image:
util_options = [
'--detach',
]
util_id, _ = docker_run(args, util_image, options=util_options)
if args.explain:
util_id = 'util_id'
else:
util_id = util_id.strip()
else:
util_id = None
test_options = [
'--detach',
'--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro',
'--privileged=%s' % str(privileged).lower(),
]
if util_id:
test_options += [
'--link', '%s:ansible.http.tests' % util_id,
'--link', '%s:sni1.ansible.http.tests' % util_id,
'--link', '%s:sni2.ansible.http.tests' % util_id,
'--link', '%s:fail.ansible.http.tests' % util_id,
'--env', 'HTTPTESTER=1',
]
if isinstance(args, TestConfig):
cloud_platforms = get_cloud_providers(args)
for cloud_platform in cloud_platforms:
test_options += cloud_platform.get_docker_run_options()
test_id, _ = docker_run(args, test_image, options=test_options)
if args.explain:
test_id = 'test_id'
else:
test_id = test_id.strip()
# write temporary files to /root since /tmp isn't ready immediately on container start
docker_put(args, test_id, 'test/runner/setup/docker.sh', '/root/docker.sh')
docker_exec(args, test_id, ['/bin/bash', '/root/docker.sh'])
docker_put(args, test_id, local_source_fd.name, '/root/ansible.tgz')
docker_exec(args, test_id, ['mkdir', '/root/ansible'])
docker_exec(args, test_id, ['tar', 'oxzf', '/root/ansible.tgz', '-C', '/root/ansible'])
# docker images are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
try:
docker_exec(args, test_id, cmd, options=cmd_options)
finally:
with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd:
docker_exec(args, test_id, ['tar', 'czf', '/root/results.tgz', '-C', '/root/ansible/test', 'results'])
docker_get(args, test_id, '/root/results.tgz', local_result_fd.name)
run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', 'test'])
finally:
if util_id:
docker_rm(args, util_id)
if test_id:
docker_rm(args, test_id)
def delegate_remote(args, exclude, require):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
"""
parts = args.remote.split('/', 1)
platform = parts[0]
version = parts[1]
core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage)
success = False
try:
core_ci.start()
core_ci.wait()
options = {
'--remote': 1,
}
cmd = generate_command(args, 'ansible/test/runner/test.py', options, exclude, require)
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'remote-%s-%s' % (platform, version)]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
# remote instances are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
manage = ManagePosixCI(core_ci)
manage.setup()
ssh_options = []
if isinstance(args, TestConfig):
cloud_platforms = get_cloud_providers(args)
for cloud_platform in cloud_platforms:
ssh_options += cloud_platform.get_remote_ssh_options()
try:
manage.ssh(cmd, ssh_options)
success = True
finally:
manage.ssh('rm -rf /tmp/results && cp -a ansible/test/results /tmp/results')
manage.download('/tmp/results', 'test')
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
core_ci.stop()
def generate_command(args, path, options, exclude, require):
"""
:type args: EnvironmentConfig
:type path: str
:type options: dict[str, int]
:type exclude: list[str]
:type require: list[str]
:rtype: list[str]
"""
options['--color'] = 1
cmd = [path]
cmd += list(filter_options(args, sys.argv[1:], options, exclude, require))
cmd += ['--color', 'yes' if args.color else 'no']
if args.requirements:
cmd += ['--requirements']
if isinstance(args, ShellConfig):
cmd = create_shell_command(cmd)
elif isinstance(args, SanityConfig):
if args.base_branch:
cmd += ['--base-branch', args.base_branch]
return cmd
def filter_options(args, argv, options, exclude, require):
"""
:type args: EnvironmentConfig
:type argv: list[str]
:type options: dict[str, int]
:type exclude: list[str]
:type require: list[str]
:rtype: collections.Iterable[str]
"""
options = options.copy()
options['--requirements'] = 0
if isinstance(args, TestConfig):
options.update({
'--changed': 0,
'--tracked': 0,
'--untracked': 0,
'--ignore-committed': 0,
'--ignore-staged': 0,
'--ignore-unstaged': 0,
'--changed-from': 1,
'--changed-path': 1,
'--metadata': 1,
})
elif isinstance(args, SanityConfig):
options.update({
'--base-branch': 1,
})
remaining = 0
for arg in argv:
if not arg.startswith('-') and remaining:
remaining -= 1
continue
remaining = 0
parts = arg.split('=', 1)
key = parts[0]
if key in options:
remaining = options[key] - len(parts) + 1
continue
yield arg
for target in exclude:
yield '--exclude'
yield target
for target in require:
yield '--require'
yield target
if isinstance(args, TestConfig):
if args.metadata_path:
yield '--metadata'
yield args.metadata_path
|
hubert667/AIR
|
refs/heads/master
|
build/billiard/billiard/dummy/connection.py
|
9
|
#
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from __future__ import absolute_import
__all__ = ['Client', 'Listener', 'Pipe']
from billiard.five import Queue
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass
|
slawosz/content-slice
|
refs/heads/master
|
public/javascripts/fckeditor/_samples/py/sampleposteddata.py
|
22
|
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This page lists the data posted by a form.
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table border="1" cellspacing="0" id="outputSample">
<colgroup><col width="80"><col></colgroup>
<thead>
<tr>
<th>Field Name</th>
<th>Value</th>
</tr>
</thead>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<th>%s</th>
<td><pre>%s</pre></td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
|
shepdelacreme/ansible
|
refs/heads/devel
|
test/units/modules/network/ironware/test_ironware_config.py
|
30
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.ironware import ironware_config
from .ironware_module import TestIronwareModule, load_fixture
from units.modules.utils import set_module_args
class TestIronwareConfigModule(TestIronwareModule):
module = ironware_config
def setUp(self):
super(TestIronwareConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.ironware.ironware_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.ironware.ironware_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.ironware.ironware_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestIronwareConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
config_file = 'ironware_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def execute_module(self, failed=False, changed=False, updates=None, sort=True, defaults=False):
self.load_fixtures(updates)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if updates is not None:
if sort:
self.assertEqual(sorted(updates), sorted(result['updates']), result['updates'])
else:
self.assertEqual(updates, result['updates'], result['updates'])
return result
def test_ironware_config_unchanged(self):
src = load_fixture('ironware_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_ironware_config_src(self):
src = load_fixture('ironware_config_src.cfg')
set_module_args(dict(src=src))
updates = ['hostname foo', 'interface ethernet 1/1',
'no ip address']
self.execute_module(changed=True, updates=updates)
def test_ironware_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_ironware_config_save_always(self):
self.run_commands.return_value = "hostname foobar"
set_module_args(dict(save_when='always'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 1)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 0)
def test_ironware_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foobar']))
updates = ['hostname foobar']
self.execute_module(changed=True, updates=updates)
def test_ironware_config_lines_w_parents(self):
set_module_args(dict(lines=['disable'], parents=['interface ethernet 1/1']))
updates = ['interface ethernet 1/1', 'disable']
self.execute_module(changed=True, updates=updates)
def test_ironware_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
updates = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, updates=updates, sort=False)
def test_ironware_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
updates = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, updates=updates, sort=False)
def test_ironware_config_before_after_no_change(self):
set_module_args(dict(lines=['hostname router'],
before=['test1', 'test2'],
after=['test3', 'test4']))
self.execute_module()
def test_ironware_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
updates = ['hostname router']
self.execute_module(changed=True, updates=updates)
def test_ironware_config_replace_block(self):
lines = ['port-name test string', 'test string']
parents = ['interface ethernet 1/1']
set_module_args(dict(lines=lines, replace='block', parents=parents))
updates = parents + lines
self.execute_module(changed=True, updates=updates)
def test_ironware_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, updates=lines)
def test_ironware_config_match_none(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'port-name test string']
parents = ['interface ethernet 1/1']
set_module_args(dict(lines=lines, parents=parents, match='none'))
updates = parents + lines
self.execute_module(changed=True, updates=updates, sort=False)
def test_ironware_config_match_strict(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'port-name test string',
'disable']
parents = ['interface ethernet 1/1']
set_module_args(dict(lines=lines, parents=parents, match='strict'))
updates = parents + ['disable']
self.execute_module(changed=True, updates=updates, sort=False)
def test_ironware_config_match_exact(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'port-name test string',
'disable']
parents = ['interface ethernet 1/1']
set_module_args(dict(lines=lines, parents=parents, match='exact'))
updates = parents + lines
self.execute_module(changed=True, updates=updates, sort=False)
|
kirananto/android_kernel_motorola_msm8916
|
refs/heads/aosp-5.1.1
|
scripts/gcc-wrapper.py
|
580
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:63",
"kprobes.c:1493",
"rcutree.c:1614",
"af_unix.c:893",
"nl80211.c:58",
"jhash.h:137",
"cmpxchg.h:162",
"ping.c:87",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
imsparsh/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/python/test/__init__.py
|
142
|
"""
Unit tests for L{twisted.python}.
"""
|
webmasterraj/FogOrNot
|
refs/heads/master
|
flask/lib/python2.7/site-packages/flask/testsuite/test_apps/moduleapp/apps/frontend/__init__.py
|
628
|
from flask import Module, render_template
frontend = Module(__name__)
@frontend.route('/')
def index():
return render_template('frontend/index.html')
|
arjunasuresh3/Mypykoans
|
refs/heads/master
|
python 3/libs/mock.py
|
38
|
# mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2009 Michael Foord
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mock 0.6.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'patch',
'patch_object',
'sentinel',
'DEFAULT'
)
__version__ = '0.6.0 modified by Greg Malcolm'
class SentinelObject(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return '<SentinelObject "{0!s}">'.format(self.name)
class Sentinel(object):
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
return self._sentinels.setdefault(name, SentinelObject(name))
sentinel = Sentinel()
DEFAULT = sentinel.DEFAULT
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _is_magic(name):
return '__{0!s}__'.format(name[2:-2]) == name
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
class Mock(object):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
name=None, parent=None, wraps=None):
self._parent = parent
self._name = name
if spec is not None and not isinstance(spec, list):
spec = [member for member in dir(spec) if not _is_magic(member)]
self._methods = spec
self._children = {}
self._return_value = return_value
self.side_effect = side_effect
self._wraps = wraps
self.reset_mock()
def reset_mock(self):
self.called = False
self.call_args = None
self.call_count = 0
self.call_args_list = []
self.method_calls = []
for child in self._children.values():
child.reset_mock()
if isinstance(self._return_value, Mock):
self._return_value.reset_mock()
def __get_return_value(self):
if self._return_value is DEFAULT:
self._return_value = Mock()
return self._return_value
def __set_return_value(self, value):
self._return_value = value
return_value = property(__get_return_value, __set_return_value)
def __call__(self, *args, **kwargs):
self.called = True
self.call_count += 1
self.call_args = (args, kwargs)
self.call_args_list.append((args, kwargs))
parent = self._parent
name = self._name
while parent is not None:
parent.method_calls.append((name, args, kwargs))
if parent._parent is None:
break
name = parent._name + '.' + name
parent = parent._parent
ret_val = DEFAULT
if self.side_effect is not None:
if (isinstance(self.side_effect, Exception) or
isinstance(self.side_effect, (type, ClassType)) and
issubclass(self.side_effect, Exception)):
raise self.side_effect
ret_val = self.side_effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if self._wraps is not None and self._return_value is DEFAULT:
return self._wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
def __getattr__(self, name):
if self._methods is not None:
if name not in self._methods:
raise AttributeError("Mock object has no attribute '{0!s}'".format(name))
elif _is_magic(name):
raise AttributeError(name)
if name not in self._children:
wraps = None
if self._wraps is not None:
wraps = getattr(self._wraps, name)
self._children[name] = Mock(parent=self, name=name, wraps=wraps)
return self._children[name]
def assert_called_with(self, *args, **kwargs):
assert self.call_args == (args, kwargs), 'Expected: {0!s}\nCalled with: {1!s}'.format((args, kwargs), self.call_args)
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".{0!s}".format(comp)
thing = _dot_lookup(thing, comp, import_path)
return thing
class _patch(object):
def __init__(self, target, attribute, new, spec, create):
self.target = target
self.attribute = attribute
self.new = new
self.spec = spec
self.create = create
self.has_local = False
def __call__(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with 2.5)
extra_args = []
for patching in patched.patchings:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
try:
return func(*args, **keywargs)
finally:
for patching in getattr(patched, 'patchings', []):
patching.__exit__()
patched.patchings = [self]
patched.__name__ = func.__name__
patched.compat_co_firstlineno = getattr(func, "compat_co_firstlineno",
func.func_code.co_firstlineno)
return patched
def get_original(self):
target = self.target
name = self.attribute
create = self.create
original = DEFAULT
if _has_local_attr(target, name):
try:
original = target.__dict__[name]
except AttributeError:
# for instances of classes with slots, they have no __dict__
original = getattr(target, name)
elif not create and not hasattr(target, name):
raise AttributeError("{0!s} does not have the attribute {1!r}".format(target, name))
return original
def __enter__(self):
new, spec, = self.new, self.spec
original = self.get_original()
if new is DEFAULT:
# XXXX what if original is DEFAULT - shouldn't use it as a spec
inherit = False
if spec == True:
# set spec to the object we are replacing
spec = original
if isinstance(spec, (type, ClassType)):
inherit = True
new = Mock(spec=spec)
if inherit:
new.return_value = Mock(spec=spec)
self.temp_original = original
setattr(self.target, self.attribute, new)
return new
def __exit__(self, *_):
if self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
del self.temp_original
def patch_object(target, attribute, new=DEFAULT, spec=None, create=False):
return _patch(target, attribute, new, spec, create)
def patch(target, new=DEFAULT, spec=None, create=False):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: {0!r}".format(target,))
target = _importer(target)
return _patch(target, attribute, new, spec, create)
def _has_local_attr(obj, name):
try:
return name in vars(obj)
except TypeError:
# objects without a __dict__
return hasattr(obj, name)
|
gangadhar-kadam/verve-erp
|
refs/heads/v5.0
|
erpnext/selling/report/lead_details/__init__.py
|
12133432
| |
sophron/wifiphisher
|
refs/heads/master
|
wifiphisher/common/__init__.py
|
12133432
| |
fabian4/trove
|
refs/heads/master
|
trove/tests/unittests/common/test_pagination.py
|
2
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from trove.common import pagination
from trove.tests.unittests import trove_testtools
class TestPaginatedDataView(trove_testtools.TestCase):
def test_creation_with_string_marker(self):
view = pagination.PaginatedDataView("TestType", [],
"http://current_page",
next_page_marker="marker")
self.assertEqual("marker", view.next_page_marker)
def test_creation_with_none_marker(self):
view = pagination.PaginatedDataView("TestType", [],
"http://current_page",
next_page_marker=None)
self.assertIsNone(view.next_page_marker)
def test_creation_with_none_string_marker(self):
view = pagination.PaginatedDataView("TestType", [],
"http://current_page",
next_page_marker=52)
self.assertEqual("52", view.next_page_marker)
def _do_paginate_list(self, limit=None, marker=None, include_marker=False):
li = ['a', 'b', 'c', 'd', 'e']
return pagination.paginate_list(li, limit, marker, include_marker)
def test_paginate_list(self):
# start list
li_1, marker_1 = self._do_paginate_list(limit=2)
self.assertEqual(['a', 'b'], li_1)
self.assertEqual('c', marker_1)
# continue list, do not include marker in result
li_2, marker_2 = self._do_paginate_list(limit=2, marker=marker_1)
self.assertEqual(['d', 'e'], li_2)
self.assertEqual(None, marker_2)
# alternate continue list, include marker in result
li_3, marker_3 = self._do_paginate_list(limit=2, marker=marker_1,
include_marker=True)
self.assertEqual(['c', 'd'], li_3)
self.assertEqual('e', marker_3)
# bad marker
li_4, marker_4 = self._do_paginate_list(marker='f')
self.assertEqual([], li_4)
self.assertEqual(None, marker_4)
|
wdzhou/mantid
|
refs/heads/master
|
Framework/PythonInterface/test/python/plugins/algorithms/PoldiCreatePeaksFromFileTest.py
|
3
|
# pylint: disable=no-init,invalid-name,too-many-public-methods
from __future__ import (absolute_import, division, print_function)
import unittest
from testhelpers import assertRaisesNothing
from testhelpers.tempfile_wrapper import TemporaryFileHelper
from mantid.kernel import *
from mantid.api import *
from mantid.simpleapi import *
class PoldiCreatePeaksFromFileTest(unittest.TestCase):
testname = None
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def test_Init(self):
assertRaisesNothing(self, AlgorithmManager.create, ("PoldiCreatePeaksFromFile"))
def test_FileOneCompoundOneAtom(self):
fileHelper = TemporaryFileHelper("""Silicon {
Lattice: 5.43 5.43 5.43 90.0 90.0 90.0
Spacegroup: F d -3 m
Atoms: {
Si 0 0 0 1.0 0.05
}
}""")
ws = PoldiCreatePeaksFromFile(fileHelper.getName(), 0.7, 10.0)
# Check output GroupWorkspace
self.assertEqual(ws.getNumberOfEntries(), 1)
self.assertTrue(ws.contains("Silicon"))
# Check that the ouput is identical to what's expected
ws_expected = PoldiCreatePeaksFromCell("F d -3 m", "Si 0 0 0 1.0 0.05", a=5.43, LatticeSpacingMin=0.7)
si_ws = AnalysisDataService.retrieve("Silicon")
self._tablesAreEqual(si_ws, ws_expected)
# Clean up
self._cleanWorkspaces([ws, ws_expected])
def test_FileOneCompoundTwoAtoms(self):
# It's the same structure and the same reflections, just the structure factors are different
fileHelper = TemporaryFileHelper("""SiliconCarbon {
Lattice: 5.43 5.43 5.43 90.0 90.0 90.0
Spacegroup: F d -3 m
Atoms: {
Si 0 0 0 0.9 0.05
C 0 0 0 0.1 0.05
}
# Comment
}""")
ws = PoldiCreatePeaksFromFile(fileHelper.getName(), 0.7, 10.0)
self.assertEqual(ws.getNumberOfEntries(), 1)
self.assertTrue(ws.contains("SiliconCarbon"))
ws_expected = PoldiCreatePeaksFromCell("F d -3 m", "Si 0 0 0 0.9 0.05; C 0 0 0 0.1 0.05", a=5.43,
LatticeSpacingMin=0.7)
si_ws = AnalysisDataService.retrieve("SiliconCarbon")
self._tablesAreEqual(si_ws, ws_expected)
# Clean up
self._cleanWorkspaces([ws, ws_expected])
def test_FileTwoCompounds(self):
# Using two imaginary structures to check that two compounds are parsed correctly as well
fileHelper = TemporaryFileHelper("""SiliconCarbon {
Lattice: 5.43 5.43 5.43 90.0 90.0 120.0
Spacegroup: P 63/m m c
Atoms: {
Si 0 0 0 0.9 0.05
C 0 0 0 0.1 0.05
}
}
Silicon {
Lattice: 5.43 5.43 5.43 90.0 90.0 90.0
Spacegroup: F d -3 m
Atoms: {
Si 1/2 1/2 0 1.0 0.05
}
}""")
ws = PoldiCreatePeaksFromFile(fileHelper.getName(), 0.7, 10.0)
self.assertEqual(ws.getNumberOfEntries(), 2)
self.assertTrue(ws.contains("SiliconCarbon"))
self.assertTrue(ws.contains("Silicon"))
self._cleanWorkspaces([ws])
def test_FileFaultyLatticeStrings(self):
fhLatticeMissing = TemporaryFileHelper("""Silicon {
Spacegroup: F d -3 m
Atoms: {
Si 0 0 0 1.0 0.05
}
}""")
fhNoLattice = TemporaryFileHelper("""Silicon {
Lattice:
Spacegroup: F d -3 m
Atoms: {
Si 0 0 0 1.0 0.05
}
}""")
fhInvalidLattice = TemporaryFileHelper("""Silicon {
Lattice: invalid
Spacegroup: F d -3 m
Atoms: {
Si 0 0 0 1.0 0.05
}
}""")
self.assertRaises(RuntimeError, PoldiCreatePeaksFromFile, *(fhLatticeMissing.getName(), 0.7, 10.0, 'ws'))
self.assertRaises(RuntimeError, PoldiCreatePeaksFromFile, *(fhNoLattice.getName(), 0.7, 10.0, 'ws'))
self.assertRaises(RuntimeError, PoldiCreatePeaksFromFile, *(fhInvalidLattice.getName(), 0.7, 10.0, 'ws'))
def test_FileFaultySpaceGroupStrings(self):
fhSgMissing = TemporaryFileHelper("""Silicon {
Lattice: 5.43 5.43 5.43 90.0 90.0 90.0
Atoms: {
Si 0 0 0 1.0 0.05
}
}""")
fhSgInvalid = TemporaryFileHelper("""Silicon {
Lattice: 5.43 5.43 5.43 90.0 90.0 90.0
Spacegroup: invalid
Atoms: {
Si 0 0 0 1.0 0.05
}
}""")
self.assertRaises(RuntimeError, PoldiCreatePeaksFromFile, *(fhSgMissing.getName(), 0.7, 10.0, 'ws'))
self.assertRaises(RuntimeError, PoldiCreatePeaksFromFile, *(fhSgInvalid.getName(), 0.7, 10.0, 'ws'))
def test_FileFaultyAtomStrings(self):
fhAtomsMissing = TemporaryFileHelper("""Silicon {
Lattice: 5.43 5.43 5.43 90.0 90.0 90.0
Spacegroup: F d -3 m
}""")
fhAtomsNoBraces = TemporaryFileHelper("""Silicon {
Lattice: 5.43 5.43 5.43 90.0 90.0 90.0
Spacegroup: invalid
Atoms:
Sis 0 0 0 1.0 0.05
}""")
fhAtomsEmpty = TemporaryFileHelper("""Silicon {
Lattice: 5.43 5.43 5.43 90.0 90.0 90.0
Spacegroup: invalid
Atoms: { }
}""")
self.assertRaises(RuntimeError, PoldiCreatePeaksFromFile, *(fhAtomsMissing.getName(), 0.7, 10.0, 'ws'))
self.assertRaises(RuntimeError, PoldiCreatePeaksFromFile, *(fhAtomsNoBraces.getName(), 0.7, 10.0, 'ws'))
self.assertRaises(RuntimeError, PoldiCreatePeaksFromFile, *(fhAtomsEmpty.getName(), 0.7, 10.0, 'ws'))
def _tablesAreEqual(self, lhs, rhs):
self.assertEqual(lhs.rowCount(), rhs.rowCount(), msg="Row count of tables is different")
for r in range(lhs.rowCount()):
self.assertEqual(lhs.row(r), rhs.row(r), "Row " + str(r) + " of tables differ.")
def _cleanWorkspaces(self, wsList):
for ws in wsList:
DeleteWorkspace(ws)
if __name__ == '__main__':
# Only test if algorithm is registered (pyparsing dependency).
if AlgorithmFactory.exists("PoldiCreatePeaksFromFile"):
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.