repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
sbellem/bitcoin | qa/rpc-tests/receivedby.py | 140 | 7345 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(BitcoinTestFramework):
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
| mit |
dayongxie/mod-pbxproj | pbxproj/XcodeProject.py | 2 | 2206 | import shutil
import datetime
from pbxproj.pbxextensions import *
class XcodeProject(PBXGenericObject, ProjectFiles, ProjectFlags, ProjectGroups):
"""
Top level class, handles the project CRUD operations, new, load, save, delete. Also, exposes methods to manipulate
the project's content, add/remove files, add/remove libraries/frameworks, query sections. For more advanced
operations, underlying objects are exposed that can be manipulated using said objects.
"""
def __init__(self, tree=None, path=None):
super(XcodeProject, self).__init__(parent=None)
if path is None:
path = os.path.join(os.getcwd(), 'project.pbxproj')
self._pbxproj_path = os.path.abspath(path)
self._source_root = os.path.abspath(os.path.join(os.path.split(path)[0], '..'))
# initialize the structure using the given tree
self.parse(tree)
def save(self, path=None):
if path is None:
path = self._pbxproj_path
f = open(path, 'w')
f.write(self.__repr__())
f.close()
def backup(self):
backup_name = "%s_%s.backup" % (self._pbxproj_path, datetime.datetime.now().strftime('%d%m%y-%H%M%S'))
shutil.copy2(self._pbxproj_path, backup_name)
return backup_name
def __repr__(self):
return u'// !$*UTF8*$!\n' + super(XcodeProject, self).__repr__()
def get_ids(self):
return self.objects.get_keys()
def get_build_phases_by_name(self, phase_name):
return self.objects.get_objects_in_section(phase_name)
def get_build_files_for_file(self, file_id):
return [build_file for build_file in self.objects.get_objects_in_section(u'PBXBuildFile')
if build_file.fileRef == file_id]
def get_target_by_name(self, name):
targets = self.objects.get_targets(name)
if targets.__len__() > 0:
return targets[0]
return None
def get_object(self, object_id):
return self.objects[object_id]
@classmethod
def load(cls, path):
import openstep_parser as osp
tree = osp.OpenStepDecoder.ParseFromFile(open(path, 'r'))
return XcodeProject(tree, path)
| bsd-3-clause |
freeflightsim/ffs-app-engine | _freeflightsim.appspot.com.1/gdata/test_config.py | 2 | 14876 | #!/usr/bin/env python
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import getpass
import inspect
import atom.mock_http_core
import gdata.gauth
"""Loads configuration for tests which connect to Google servers.
Settings used in tests are stored in a ConfigCollection instance in this
module called options. If your test needs to get a test related setting,
use
import gdata.test_config
option_value = gdata.test_config.options.get_value('x')
The above will check the command line for an '--x' argument, and if not
found will either use the default value for 'x' or prompt the user to enter
one.
Your test can override the value specified by the user by performing:
gdata.test_config.options.set_value('x', 'y')
If your test uses a new option which you would like to allow the user to
specify on the command line or via a prompt, you can use the register_option
method as follows:
gdata.test_config.options.register(
'option_name', 'Prompt shown to the user', secret=False #As for password.
'This is the description of the option, shown when help is requested.',
'default value, provide only if you do not want the user to be prompted')
"""
class Option(object):
def __init__(self, name, prompt, secret=False, description=None, default=None):
self.name = name
self.prompt = prompt
self.secret = secret
self.description = description
self.default = default
def get(self):
value = self.default
# Check for a command line parameter.
for i in xrange(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % self.name):
value = sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % self.name:
value = sys.argv[i + 1]
# If the param was not on the command line, ask the user to input the
# value.
# In order for this to prompt the user, the default value for the option
# must be None.
if value is None:
prompt = '%s: ' % self.prompt
if self.secret:
value = getpass.getpass(prompt)
else:
print 'You can specify this on the command line using --%s' % self.name
value = raw_input(prompt)
return value
class ConfigCollection(object):
def __init__(self, options=None):
self.options = options or {}
self.values = {}
def register_option(self, option):
self.options[option.name] = option
def register(self, *args, **kwargs):
self.register_option(Option(*args, **kwargs))
def get_value(self, option_name):
if option_name in self.values:
return self.values[option_name]
value = self.options[option_name].get()
if value is not None:
self.values[option_name] = value
return value
def set_value(self, option_name, value):
self.values[option_name] = value
def render_usage(self):
message_parts = []
for opt_name, option in self.options.iteritems():
message_parts.append('--%s: %s' % (opt_name, option.description))
return '\n'.join(message_parts)
options = ConfigCollection()
# Register the default options.
options.register(
'username',
'Please enter the email address of your test account',
description=('The email address you want to sign in with. '
'Make sure this is a test account as these tests may edit'
' or delete data.'))
options.register(
'password',
'Please enter the password for your test account',
secret=True, description='The test accounts password.')
options.register(
'clearcache',
'Delete cached data? (enter true or false)',
description=('If set to true, any temporary files which cache test'
' requests and responses will be deleted.'),
default='true')
options.register(
'savecache',
'Save requests and responses in a temporary file? (enter true or false)',
description=('If set to true, requests to the server and responses will'
' be saved in temporary files.'),
default='false')
options.register(
'runlive',
'Run the live tests which contact the server? (enter true or false)',
description=('If set to true, the tests will make real HTTP requests to'
' the servers. This slows down test execution and may'
' modify the users data, be sure to use a test account.'),
default='true')
options.register(
'ssl',
'Run the live tests over SSL (enter true or false)',
description='If set to true, all tests will be performed over HTTPS (SSL)',
default='false')
# Other options which may be used if needed.
BLOG_ID_OPTION = Option(
'blogid',
'Please enter the ID of your test blog',
description=('The blog ID for the blog which should have test posts added'
' to it. Example 7682659670455539811'))
TEST_IMAGE_LOCATION_OPTION = Option(
'imgpath',
'Please enter the full path to a test image to upload',
description=('This test image will be uploaded to a service which'
' accepts a media file, it must be a jpeg.'))
SPREADSHEET_ID_OPTION = Option(
'spreadsheetid',
'Please enter the ID of a spreadsheet to use in these tests',
description=('The spreadsheet ID for the spreadsheet which should be'
' modified by theses tests.'))
DOMAIN_OPTION = Option(
'domain',
'Please enter your Google Apps domain',
description=('The domain the Google Apps is hosted on or leave blank'
' if n/a'))
SITES_NAME_OPTION = Option(
'sitename',
'Please enter name of your Google Site',
description='The webspace name of the Site found in its URL.')
PROJECT_NAME_OPTION = Option(
'project_name',
'Please enter the name of your project hosting project',
description=('The name of the project which should have test issues added'
' to it. Example gdata-python-client'))
ISSUE_ASSIGNEE_OPTION = Option(
'issue_assignee',
'Enter the email address of the target owner of the updated issue.',
description=('The email address of the user a created issue\'s owner will '
' become. Example testuser2@gmail.com'))
GA_TABLE_ID = Option(
'table_id',
'Enter the Table ID of the Google Analytics profile to test',
description=('The Table ID of the Google Analytics profile to test.'
' Example ga:1174'))
# Functions to inject a cachable HTTP client into a service client.
def configure_client(client, case_name, service_name):
"""Sets up a mock client which will reuse a saved session.
Should be called during setUp of each unit test.
Handles authentication to allow the GDClient to make requests which
require an auth header.
Args:
client: a gdata.GDClient whose http_client member should be replaced
with a atom.mock_http_core.MockHttpClient so that repeated
executions can used cached responses instead of contacting
the server.
case_name: str The name of the test case class. Examples: 'BloggerTest',
'ContactsTest'. Used to save a session
for the ClientLogin auth token request, so the case_name
should be reused if and only if the same username, password,
and service are being used.
service_name: str The service name as used for ClientLogin to identify
the Google Data API being accessed. Example: 'blogger',
'wise', etc.
"""
# Use a mock HTTP client which will record and replay the HTTP traffic
# from these tests.
client.http_client = atom.mock_http_core.MockHttpClient()
client.http_client.cache_case_name = case_name
# Getting the auth token only needs to be done once in the course of test
# runs.
auth_token_key = '%s_auth_token' % service_name
if (auth_token_key not in options.values
and options.get_value('runlive') == 'true'):
client.http_client.cache_test_name = 'client_login'
cache_name = client.http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
client.http_client.delete_session(cache_name)
client.http_client.use_cached_session(cache_name)
auth_token = client.request_client_login_token(
options.get_value('username'), options.get_value('password'),
case_name, service=service_name)
options.values[auth_token_key] = gdata.gauth.token_to_blob(auth_token)
client.http_client.close_session()
# Allow a config auth_token of False to prevent the client's auth header
# from being modified.
if auth_token_key in options.values:
client.auth_token = gdata.gauth.token_from_blob(
options.values[auth_token_key])
def configure_cache(client, test_name):
"""Loads or begins a cached session to record HTTP traffic.
Should be called at the beginning of each test method.
Args:
client: a gdata.GDClient whose http_client member has been replaced
with a atom.mock_http_core.MockHttpClient so that repeated
executions can used cached responses instead of contacting
the server.
test_name: str The name of this test method. Examples:
'TestClass.test_x_works', 'TestClass.test_crud_operations'.
This is used to name the recording of the HTTP requests and
responses, so it should be unique to each test method in the
test case.
"""
# Auth token is obtained in configure_client which is called as part of
# setUp.
client.http_client.cache_test_name = test_name
cache_name = client.http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
client.http_client.delete_session(cache_name)
client.http_client.use_cached_session(cache_name)
def close_client(client):
"""Saves the recoded responses to a temp file if the config file allows.
This should be called in the unit test's tearDown method.
Checks to see if the 'savecache' option is set to 'true', to make sure we
only save sessions to repeat if the user desires.
"""
if client and options.get_value('savecache') == 'true':
# If this was a live request, save the recording.
client.http_client.close_session()
def configure_service(service, case_name, service_name):
"""Sets up a mock GDataService v1 client to reuse recorded sessions.
Should be called during setUp of each unit test. This is a duplicate of
configure_client, modified to handle old v1 service classes.
"""
service.http_client.v2_http_client = atom.mock_http_core.MockHttpClient()
service.http_client.v2_http_client.cache_case_name = case_name
# Getting the auth token only needs to be done once in the course of test
# runs.
auth_token_key = 'service_%s_auth_token' % service_name
if (auth_token_key not in options.values
and options.get_value('runlive') == 'true'):
service.http_client.v2_http_client.cache_test_name = 'client_login'
cache_name = service.http_client.v2_http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
service.http_client.v2_http_client.delete_session(cache_name)
service.http_client.v2_http_client.use_cached_session(cache_name)
service.ClientLogin(options.get_value('username'),
options.get_value('password'),
service=service_name, source=case_name)
options.values[auth_token_key] = service.GetClientLoginToken()
service.http_client.v2_http_client.close_session()
if auth_token_key in options.values:
service.SetClientLoginToken(options.values[auth_token_key])
def configure_service_cache(service, test_name):
"""Loads or starts a session recording for a v1 Service object.
Duplicates the behavior of configure_cache, but the target for this
function is a v1 Service object instead of a v2 Client.
"""
service.http_client.v2_http_client.cache_test_name = test_name
cache_name = service.http_client.v2_http_client.get_cache_file_name()
if options.get_value('clearcache') == 'true':
service.http_client.v2_http_client.delete_session(cache_name)
service.http_client.v2_http_client.use_cached_session(cache_name)
def close_service(service):
if service and options.get_value('savecache') == 'true':
# If this was a live request, save the recording.
service.http_client.v2_http_client.close_session()
def build_suite(classes):
"""Creates a TestSuite for all unit test classes in the list.
Assumes that each of the classes in the list has unit test methods which
begin with 'test'. Calls unittest.makeSuite.
Returns:
A new unittest.TestSuite containing a test suite for all classes.
"""
suites = [unittest.makeSuite(a_class, 'test') for a_class in classes]
return unittest.TestSuite(suites)
def check_data_classes(test, classes):
import inspect
for data_class in classes:
test.assert_(data_class.__doc__ is not None,
'The class %s should have a docstring' % data_class)
if hasattr(data_class, '_qname'):
qname_versions = None
if isinstance(data_class._qname, tuple):
qname_versions = data_class._qname
else:
qname_versions = (data_class._qname,)
for versioned_qname in qname_versions:
test.assert_(isinstance(versioned_qname, str),
'The class %s has a non-string _qname' % data_class)
test.assert_(not versioned_qname.endswith('}'),
'The _qname for class %s is only a namespace' % (
data_class))
for attribute_name, value in data_class.__dict__.iteritems():
# Ignore all elements that start with _ (private members)
if not attribute_name.startswith('_'):
try:
if not (isinstance(value, str) or inspect.isfunction(value)
or (isinstance(value, list)
and issubclass(value[0], atom.core.XmlElement))
or type(value) == property # Allow properties.
or inspect.ismethod(value) # Allow methods.
or issubclass(value, atom.core.XmlElement)):
test.fail(
'XmlElement member should have an attribute, XML class,'
' or list of XML classes as attributes.')
except TypeError:
test.fail('Element %s in %s was of type %s' % (
attribute_name, data_class._qname, type(value)))
| gpl-2.0 |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/ctypes/test/test_internals.py | 109 | 2623 | # This tests the internal _objects attribute
import unittest
from ctypes import *
from sys import getrefcount as grc
# XXX This test must be reviewed for correctness!!!
"""
ctypes' types are container types.
They have an internal memory block, which only consists of some bytes,
but it has to keep references to other objects as well. This is not
really needed for trivial C types like int or char, but it is important
for aggregate types like strings or pointers in particular.
What about pointers?
"""
class ObjectsTestCase(unittest.TestCase):
def assertSame(self, a, b):
self.assertEqual(id(a), id(b))
def test_ints(self):
i = 42000123
refcnt = grc(i)
ci = c_int(i)
self.assertEqual(refcnt, grc(i))
self.assertEqual(ci._objects, None)
def test_c_char_p(self):
s = "Hello, World"
refcnt = grc(s)
cs = c_char_p(s)
self.assertEqual(refcnt + 1, grc(s))
self.assertSame(cs._objects, s)
def test_simple_struct(self):
class X(Structure):
_fields_ = [("a", c_int), ("b", c_int)]
a = 421234
b = 421235
x = X()
self.assertEqual(x._objects, None)
x.a = a
x.b = b
self.assertEqual(x._objects, None)
def test_embedded_structs(self):
class X(Structure):
_fields_ = [("a", c_int), ("b", c_int)]
class Y(Structure):
_fields_ = [("x", X), ("y", X)]
y = Y()
self.assertEqual(y._objects, None)
x1, x2 = X(), X()
y.x, y.y = x1, x2
self.assertEqual(y._objects, {"0": {}, "1": {}})
x1.a, x2.b = 42, 93
self.assertEqual(y._objects, {"0": {}, "1": {}})
def test_xxx(self):
class X(Structure):
_fields_ = [("a", c_char_p), ("b", c_char_p)]
class Y(Structure):
_fields_ = [("x", X), ("y", X)]
s1 = "Hello, World"
s2 = "Hallo, Welt"
x = X()
x.a = s1
x.b = s2
self.assertEqual(x._objects, {"0": s1, "1": s2})
y = Y()
y.x = x
self.assertEqual(y._objects, {"0": {"0": s1, "1": s2}})
## x = y.x
## del y
## print x._b_base_._objects
def test_ptr_struct(self):
class X(Structure):
_fields_ = [("data", POINTER(c_int))]
A = c_int*4
a = A(11, 22, 33, 44)
self.assertEqual(a._objects, None)
x = X()
x.data = a
##XXX print x._objects
##XXX print x.data[0]
##XXX print x.data._objects
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
PolicyStat/django | tests/model_fields/models.py | 21 | 9803 | import os
import tempfile
import uuid
import warnings
try:
from PIL import Image
except ImportError:
Image = None
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageFieldFile, ImageField
from django.utils import six
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo)
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1, 'First'),
(2, 'Second'),
)
),
('Group 2', (
(3, 'Third'),
(4, 'Fourth'),
)
),
(0, 'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class Counter(six.Iterator):
def __init__(self):
self.n = 1
def __iter__(self):
return self
def __next__(self):
if self.n > 5:
raise StopIteration
else:
self.n += 1
return (self.n, 'val-' + str(self.n))
class WhizIter(models.Model):
c = models.IntegerField(choices=Counter(), null=True)
class WhizIterEmpty(models.Model):
c = models.CharField(choices=(x for x in []), blank=True, max_length=1)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class FloatModel(models.Model):
size = models.FloatField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class SmallIntegerModel(models.Model):
value = models.SmallIntegerField()
class IntegerModel(models.Model):
value = models.IntegerField()
class BigIntegerModel(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null=True, blank=True)
class PositiveSmallIntegerModel(models.Model):
value = models.PositiveSmallIntegerField()
class PositiveIntegerModel(models.Model):
value = models.PositiveIntegerField()
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField(default=None)
string = models.CharField(max_length=10, default='abc')
class DateTimeModel(models.Model):
d = models.DateField()
dt = models.DateTimeField()
t = models.TimeField()
class PrimaryKeyCharModel(models.Model):
string = models.CharField(max_length=10, primary_key=True)
class FksToBooleans(models.Model):
"""Model with FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel)
nbf = models.ForeignKey(NullBooleanModel)
class FkToChar(models.Model):
"""Model with FK to a model with a CharField primary key, #19299"""
out = models.ForeignKey(PrimaryKeyCharModel)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2", default=False)
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99)
field5 = models.DateField("verbose field5")
field6 = models.DateTimeField("verbose field6")
field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1)
field8 = models.EmailField("verbose field8")
field9 = models.FileField("verbose field9", upload_to="unused")
field10 = models.FilePathField("verbose field10")
field11 = models.FloatField("verbose field11")
# Don't want to depend on Pillow in this test
#field_image = models.ImageField("verbose field")
field12 = models.IntegerField("verbose field12")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
field13 = models.IPAddressField("verbose field13")
field14 = models.GenericIPAddressField("verbose field14", protocol="ipv4")
field15 = models.NullBooleanField("verbose field15")
field16 = models.PositiveIntegerField("verbose field16")
field17 = models.PositiveSmallIntegerField("verbose field17")
field18 = models.SlugField("verbose field18")
field19 = models.SmallIntegerField("verbose field19")
field20 = models.TextField("verbose field20")
field21 = models.TimeField("verbose field21")
field22 = models.URLField("verbose field22")
###############################################################################
# These models aren't used in any test, just here to ensure they validate
# successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
# See ticket #18389.
class FieldClassAttributeModel(models.Model):
field_class = models.CharField
###############################################################################
class DataModel(models.Model):
short_data = models.BinaryField(max_length=10, default=b'\x08')
data = models.BinaryField()
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If Pillow available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args, **kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class AbsctractPersonWithHeight(models.Model):
"""
Abstract model that defines an ImageField with only one dimension field
to make sure the dimension update is correctly run on concrete subclass
instance post-initialization.
"""
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class Meta:
abstract = True
class PersonWithHeight(AbsctractPersonWithHeight):
"""
Concrete model that subclass an abctract one with only on dimension
field.
"""
name = models.CharField(max_length=50)
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullalble ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
###############################################################################
class UUIDModel(models.Model):
field = models.UUIDField()
class NullableUUIDModel(models.Model):
field = models.UUIDField(blank=True, null=True)
class PrimaryKeyUUIDModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
| bsd-3-clause |
mihaip/NewsBlur | apps/rss_feeds/management/commands/mark_read.py | 19 | 1348 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from apps.reader.models import UserSubscription
from optparse import make_option
import datetime
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("-d", "--days", dest="days", nargs=1, default=1, help="Days of unread"),
make_option("-u", "--username", dest="username", nargs=1, help="Specify user id or username"),
make_option("-U", "--userid", dest="userid", nargs=1, help="Specify user id or username"),
)
def handle(self, *args, **options):
if options['userid']:
user = User.objects.filter(pk=options['userid'])[0]
elif options['username']:
user = User.objects.get(username__icontains=options['username'])
else:
raise Exception, "Need username or user id."
user.profile.last_seen_on = datetime.datetime.utcnow()
user.profile.save()
feeds = UserSubscription.objects.filter(user=user)
for sub in feeds:
if options['days'] == 0:
sub.mark_feed_read()
else:
sub.mark_read_date = datetime.datetime.utcnow() - datetime.timedelta(days=int(options['days']))
sub.needs_unread_recalc = True
sub.save() | mit |
yqm/sl4a | python-build/python-libs/gdata/tests/gdata_tests/blogger/service_test.py | 128 | 3578 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to exercise server interactions for blogger."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
import getpass
import atom
from gdata import test_data
import gdata.blogger
import gdata.blogger.service
username = ''
password = ''
test_blog_id = ''
class BloggerCrudTests(unittest.TestCase):
def setUp(self):
self.client = gdata.blogger.service.BloggerService(email=username,
password=password, source='GoogleInc-PythonBloggerUnitTests-1')
# TODO: if the test_blog_id is not set, get the list of the user's blogs
# and prompt for which blog to add the test posts to.
self.client.ProgrammaticLogin()
def testPostDraftUpdateAndDelete(self):
new_entry = gdata.blogger.BlogPostEntry(title=atom.Title(
text='Unit Test Post'))
new_entry.content = atom.Content('text', None, 'Hello World')
# Make this post a draft so it will not appear publicly on the blog.
new_entry.control = atom.Control(draft=atom.Draft(text='yes'))
new_entry.AddLabel('test')
posted = self.client.AddPost(new_entry, blog_id=test_blog_id)
self.assertEquals(posted.title.text, new_entry.title.text)
# Should be one category in the posted entry for the 'test' label.
self.assertEquals(len(posted.category), 1)
self.assert_(isinstance(posted, gdata.blogger.BlogPostEntry))
# Change the title and add more labels.
posted.title.text = 'Updated'
posted.AddLabel('second')
updated = self.client.UpdatePost(entry=posted)
self.assertEquals(updated.title.text, 'Updated')
self.assertEquals(len(updated.category), 2)
# Cleanup and delete the draft blog post.
self.client.DeletePost(entry=posted)
def testAddComment(self):
# Create a test post to add comments to.
new_entry = gdata.blogger.BlogPostEntry(title=atom.Title(
text='Comments Test Post'))
new_entry.content = atom.Content('text', None, 'Hello Comments')
target_post = self.client.AddPost(new_entry, blog_id=test_blog_id)
blog_id = target_post.GetBlogId()
post_id = target_post.GetPostId()
new_comment = gdata.blogger.CommentEntry()
new_comment.content = atom.Content(text='Test comment')
posted = self.client.AddComment(new_comment, blog_id=blog_id,
post_id=post_id)
self.assertEquals(posted.content.text, new_comment.content.text)
# Cleanup and delete the comment test blog post.
self.client.DeletePost(entry=target_post)
class BloggerQueryTests(unittest.TestCase):
def testConstructBlogQuery(self):
pass
def testConstructBlogQuery(self):
pass
def testConstructBlogQuery(self):
pass
if __name__ == '__main__':
print ('NOTE: Please run these tests only with a test account. ' +
'The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
test_blog_id = raw_input('Please enter the blog id for the test blog: ')
unittest.main()
| apache-2.0 |
lmascare/utils | python/admin/lank/db_bkup_restore_schema.py | 1 | 1130 | r"""Schema file to host status of MySQL Database backups."""
def table_db_brman_status(*args):
r"""DDL for Database Backups Status."""
create_sql = """
CREATE TABLE IF NOT EXISTS db_brman_status (
dbname varchar(16),
bkup_rest varchar(8),
filename varchar(128),
filesize bigint,
filetimestamp timestamp,
full_incr varchar(16),
bkup_status varchar(16),
bkup_start timestamp,
bkup_end timestamp,
id bigint auto_increment,
PRIMARY KEY (id)
);
CREATE INDEX brman_status_bkup_date on db_brman_status(bkup_date);
CREATE INDEX brman_status_type on db_brman_status(type);
"""
insert_sql = None
update_sql = None
delete_sql = None
select_sql = None
return (create_sql, insert_sql, update_sql, delete_sql, select_sql)
backup_tables = {
"db_brman_status": table_db_brman_status,
}
if __name__ == "__main__":
from lank_cfg import scriptname
from obj_utils import LogMe
mylog = LogMe()
mylog.critical("{} Should be run as a module only".format(scriptname), 0) | artistic-2.0 |
henryhallam/piksi_firmware | scripts/serial_link.py | 1 | 13314 | #!/usr/bin/env python
# Copyright (C) 2011-2014 Swift Navigation Inc.
# Contact: Fergus Noble <fergus@swift-nav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
import struct
import threading
import time
import sys
import cPickle as pickle
import calendar
import json
from sbp import crc16, SBP, SBP_PREAMBLE
from sbp.piksi import SBP_MSG_PRINT, SBP_MSG_RESET
from sbp.standard import SBP_MSG_HEARTBEAT
DEFAULT_PORT = '/dev/ttyUSB0'
DEFAULT_BAUD = 1000000
class ListenerThread (threading.Thread):
def __init__(self, link, print_unhandled=False):
super(ListenerThread, self).__init__()
self.link = link
self.wants_to_stop = False
self.print_unhandled = print_unhandled
self.daemon = True
def stop(self):
self.wants_to_stop = True
def run(self):
while not self.wants_to_stop:
try:
m = self.link.get_message()
mt = m.msg_type
# Will throw away last message here even if it is valid.
if self.wants_to_stop:
if self.link.ser:
self.link.ser.close()
break
if mt is not None:
for cb in self.link.get_global_callbacks():
cb(m)
cbs = self.link.get_callback(mt)
if cbs is None or len(cbs) == 0:
if self.print_unhandled:
print "Host Side Unhandled message %02X" % mt
else:
for cb in cbs:
cb(m)
except (IOError, OSError):
# Piksi was disconnected
print "ERROR: Piksi disconnected!"
return
except:
import traceback
print traceback.format_exc()
def list_ports(self=None):
import serial.tools.list_ports
ports = serial.tools.list_ports.comports()
# Remove ports matching "ttyS*" (non-virtual serial ports on Linux).
ports = filter(lambda x: x[1][0:4] != "ttyS", ports)
if not any(ports):
return None
else:
return ports
class SerialLink:
def __init__(self, port=DEFAULT_PORT, baud=DEFAULT_BAUD, use_ftdi=False, print_unhandled=False):
self.print_unhandled = print_unhandled
self.unhandled_bytes = 0
self.callbacks = {}
self.global_callbacks = []
if use_ftdi:
import pylibftdi
self.ser = pylibftdi.Device()
self.ser.baudrate = baud
else:
import serial
try:
self.ser = serial.Serial(port, baud, timeout=1)
except serial.SerialException:
print
print "Serial device '%s' not found" % port
print
print "The following serial devices were detected:"
print
for p in list_ports():
p_name, p_desc, _ = p
if p_desc == p_name:
print "\t%s" % p_name
else:
print "\t%s (%s)" % (p_name, p_desc)
sys.exit(1)
# Delay then flush the buffer to make sure the receive buffer starts empty.
time.sleep(0.5)
self.ser.flush()
self.lt = ListenerThread(self, print_unhandled)
self.lt.start()
def __del__(self):
self.close()
def close(self):
try:
self.lt.stop()
while self.lt.isAlive():
time.sleep(0.1)
except AttributeError:
pass
def get_message(self):
while True:
if self.lt.wants_to_stop:
return SBP(None, None, None, None, None)
# Sync with magic start bytes
magic = self.ser.read(1)
if magic:
if ord(magic) == SBP_PREAMBLE:
break
else:
self.unhandled_bytes += 1
if self.print_unhandled:
print "Host Side Unhandled byte : 0x%02x," % ord(magic), \
"total", \
self.unhandled_bytes
hdr = ""
while len(hdr) < 5:
hdr = self.ser.read(5 - len(hdr))
msg_type, sender_id, msg_len = struct.unpack('<HHB', hdr)
crc = crc16(hdr, 0)
data = ""
while len(data) < msg_len:
data += self.ser.read(msg_len - len(data))
crc = crc16(data, crc)
crc_received = ""
while len(crc_received) < 2:
crc_received = self.ser.read(2 - len(crc_received))
crc_received = struct.unpack('<H', crc_received)[0]
if crc != crc_received:
print "Host Side CRC mismatch: 0x%04X 0x%04X" % (crc, crc_received)
return SBP(None, None, None, None, None)
return SBP(msg_type, sender_id, msg_len, data, crc)
def send_message(self, msg_type, msg, sender_id=0x42):
framed_msg = struct.pack('<BHHB', SBP_PREAMBLE, msg_type, sender_id, len(msg))
framed_msg += msg
crc = crc16(framed_msg[1:], 0)
framed_msg += struct.pack('<H', crc)
self.ser.write(framed_msg)
def send_char(self, char):
self.ser.write(char)
def add_callback(self, msg_type, callback):
"""
Add a named callback for a specific SBP message type.
"""
try:
self.callbacks[msg_type].append(callback)
except KeyError:
self.callbacks[msg_type] = [callback]
def add_global_callback(self, callback):
"""
Add a global callback for all SBP messages.
"""
self.global_callbacks.append(callback)
def rm_callback(self, msg_type, callback):
try:
self.callbacks[msg_type].remove(callback)
except KeyError:
print "Can't remove callback for msg 0x%04x: message not registered" \
% msg_type
except ValueError:
print "Can't remove callback for msg 0x%04x: callback not registered" \
% msg_type
def get_callback(self, msg_type):
"""
Retrieve a named callback for a specific SBP message type.
"""
if msg_type in self.callbacks:
return self.callbacks[msg_type]
else:
return None
def get_global_callbacks(self):
"""
Retrieve a named callback for a specific SBP message type, or a global
callback for all SBP messages.
"""
return self.global_callbacks
def wait_message(self, msg_type, timeout=None):
ev = threading.Event()
d = {'data': None}
def cb(sbp_msg):
d['data'] = sbp_msg.payload
ev.set()
self.add_callback(msg_type, cb)
ev.wait(timeout)
self.rm_callback(msg_type, cb)
return d['data']
def default_print_callback(sbp_msg):
sys.stdout.write(sbp_msg.payload)
def default_log_callback(handle):
"""
Callback for binary serializing Python objects to a file with a
consistent logging format:
(delta, timestamp, item) : tuple
delta = msec reference timestamp (int)
timestamp = current timestamp (int - UTC epoch)
item = Python object to serialize
Parameters
----------
handle : file
An already-opened file handle
Returns
----------
pickler : lambda data
Function that will serialize Python object to open file_handle
"""
ref_time = time.time()
protocol = 2
return lambda data: pickle.dump(format_log_entry(ref_time, data),
handle,
protocol)
def default_log_json_callback(handle):
"""
Callback for JSON serializing Python objects to a file with a
consistent logging format:
{'delta': delta, 'timestamp': timestamp, 'data': data} : dict
delta = msec reference timestamp (int)
timestamp = current timestamp (int - UTC epoch)
data = Python object to JSON serialize
Parameters
----------
handle : file
An already-opened file handle
Returns
----------
pickler : lambda data
Function that will JSON serialize Python object to open file_handle
"""
ref_time = time.time()
return lambda data: handle.write(json.dumps(format_log_json_entry(ref_time, data)) + '\n')
def generate_log_filename():
"""
Generates a consistent filename for logging, for example:
serial_link_log_20141125-140552.log
Returns
----------
filename : str
"""
return time.strftime("serial_link_log_%Y%m%d-%H%M%S.log")
def format_log_entry(t0, item):
"""
Generates a Python object with logging metadata.
Parameters
----------
t0 : float
Reference time (seconds)
item : object
Python object to serialize
Returns
----------
(delta, timestamp, item) : tuple
delta = msec reference timestamp (int)
timestamp = current timestamp (int - UTC epoch)
item = Python object to serialize
"""
timestamp = calendar.timegm(time.gmtime())
delta = int((time.time() - t0)*1000)
return (delta, timestamp, item)
def format_log_json_entry(t0, item):
"""
Generates a Python object with logging metadata.
Parameters
----------
t0 : float
Reference time (seconds)
item : object
Python object to serialize
Returns
----------
{'delta': delta, 'timestamp': timestamp, 'data': data} : dict
delta = msec reference timestamp (int)
timestamp = current timestamp (int - UTC epoch)
data = Python object to JSON serialize
"""
timestamp = calendar.timegm(time.gmtime())
delta = int((time.time() - t0)*1000)
data = item.to_json_dict()
return {'delta': delta, 'timestamp': timestamp, 'data': data}
class Watchdog:
"""
Watchdog wraps a timer with a callback that can rearm the timer.
Parameters
----------
timeout : float
timeout of timer in seconds
alarm : callback
function to call when/if timer expires
"""
def __init__(self, timeout, alarm):
self.timeout = timeout
self.alarm = alarm
self.timer = None
def __call__(self, *args):
self.call()
def call(self):
"""
Rearm the timer.
"""
if self.timer:
self.timer.cancel()
self.timer = threading.Timer(self.timeout, self.alarm)
self.timer.daemon = True
self.timer.start()
def default_watchdog_alarm():
"""
Called when the watchdog timer alarms. Will raise a KeyboardInterrupt to the
main thread and exit the process.
"""
sys.stderr.write("ERROR: Watchdog expired!")
import thread
thread.interrupt_main()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Swift Nav Serial Link.')
parser.add_argument('-p', '--port',
default=[DEFAULT_PORT],
nargs=1,
help='specify the serial port to use.')
parser.add_argument("-b", "--baud",
default=[DEFAULT_BAUD], nargs=1,
help="specify the baud rate to use.")
parser.add_argument("-v", "--verbose",
help="print extra debugging information.",
action="store_true")
parser.add_argument("-f", "--ftdi",
help="use pylibftdi instead of pyserial.",
action="store_true")
parser.add_argument("-l", "--log",
action="store_true",
help="serialize SBP messages to autogenerated log file.")
parser.add_argument("-j", "--json",
action="store_true",
help="JSON serialize SBP messages.")
parser.add_argument("-w", "--watchdog",
default=[None], nargs=1,
help="alarm after WATCHDOG seconds have elapsed without heartbeat.")
parser.add_argument("-t", "--timeout",
default=[None], nargs=1,
help="exit after TIMEOUT seconds have elapsed.")
parser.add_argument("-r", "--reset",
action="store_true",
help="reset device after connection.")
args = parser.parse_args()
serial_port = args.port[0]
baud = args.baud[0]
lock = threading.Lock()
link = SerialLink(serial_port, baud, use_ftdi=args.ftdi,
print_unhandled=args.verbose)
link.add_callback(SBP_MSG_PRINT, default_print_callback)
# Setup logging
log_file = None
if args.log:
log_name = generate_log_filename()
log_file = open(log_name, 'w+')
print "Logging at %s." % log_name
if args.json:
link.add_global_callback(default_log_json_callback(log_file))
else:
link.add_global_callback(default_log_callback(log_file))
if args.reset:
link.send_message(SBP_MSG_RESET, '')
# Setup watchdog
watchdog = args.watchdog[0]
if watchdog:
link.add_callback(SBP_MSG_HEARTBEAT, Watchdog(float(watchdog), default_watchdog_alarm))
try:
if args.timeout[0] is None:
# Wait forever until the user presses Ctrl-C
while True:
time.sleep(0.1)
else:
# Wait until the timeout has elapsed
expire = time.time() + float(args.timeout[0])
while time.time() < expire:
time.sleep(0.1)
sys.stdout.write("Timer expired!\n")
except KeyboardInterrupt:
# Callbacks, such as the watchdog timer on SBP_HEARTBEAT call
# thread.interrupt_main(), which throw a KeyboardInterrupt
# exception. To get the proper error condition, return exit code
# of 1. Note that the finally block does get caught since exit
# itself throws a SystemExit exception.
sys.exit(1)
finally:
if log_file:
lock.acquire()
log_file.flush()
log_file.close()
lock.release()
link.close()
| gpl-3.0 |
yamada-h/ryu | ryu/services/protocols/bgp/operator/commands/show/vrf.py | 5 | 5246 | import logging
import pprint
from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.command import CommandsResponse
from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
from ryu.services.protocols.bgp.operator.command import STATUS_OK
from ryu.services.protocols.bgp.operator.commands.responses import \
WrongParamResp
from ryu.services.protocols.bgp.operator.views.conf import ConfDetailView
from ryu.services.protocols.bgp.operator.views.conf import ConfDictView
from route_formatter_mixin import RouteFormatterMixin
LOG = logging.getLogger('bgpspeaker.operator.commands.show.vrf')
class Routes(Command, RouteFormatterMixin):
help_msg = 'show routes present for vrf'
param_help_msg = '<vpn-name> <route-family>(ipv4, ipv6)'
command = 'routes'
def __init__(self, *args, **kwargs):
super(Routes, self).__init__(*args, **kwargs)
self.subcommands = {
'all': self.All,
}
def action(self, params):
if len(params) != 2:
return WrongParamResp()
vrf_name = params[0]
vrf_rf = params[1]
if vrf_rf not in ('ipv4', 'ipv6'):
return WrongParamResp('route-family not one of (ipv4, ipv6)')
from ryu.services.protocols.bgp.operator.internal_api import \
WrongParamError
try:
return CommandsResponse(
STATUS_OK,
self.api.get_single_vrf_routes(vrf_name, vrf_rf)
)
except WrongParamError as e:
return CommandsResponse(
STATUS_ERROR,
'wrong parameters: %s' % str(e)
)
@classmethod
def cli_resp_formatter(cls, resp):
if resp.status == STATUS_ERROR:
return super(Routes, cls).cli_resp_formatter(resp)
return cls._format_family_header() + cls._format_family(resp.value)
class All(Command, RouteFormatterMixin):
help_msg = 'show routes for all VRFs'
command = 'all'
def action(self, params):
if len(params) != 0:
return WrongParamResp()
return CommandsResponse(
STATUS_OK,
self.api.get_all_vrf_routes()
)
@classmethod
def cli_resp_formatter(cls, resp):
if resp.status == STATUS_ERROR:
return Command.cli_resp_formatter(resp)
ret = cls._format_family_header()
for family, data in resp.value.iteritems():
ret += 'VPN: {0}\n'.format(family)
ret += cls._format_family(data)
return ret
class CountRoutesMixin(object):
def _count_routes(self, vrf_name, vrf_rf):
return len(self.api.get_single_vrf_routes(vrf_name, vrf_rf))
class Summary(Command, CountRoutesMixin):
help_msg = 'show configuration and summary of vrf'
param_help_msg = '<rd> <route_family>| all'
command = 'summary'
def __init__(self, *args, **kwargs):
super(Summary, self).__init__(*args, **kwargs)
self.subcommands = {
'all': self.All
}
def action(self, params):
if len(params) == 0:
return WrongParamResp('Not enough params')
vrf_confs = self.api.get_vrfs_conf()
if len(params) < 2:
vrf_rf = 'ipv4'
else:
vrf_rf = params[1]
vrf_key = params[0], vrf_rf
if vrf_key in vrf_confs:
view = ConfDetailView(vrf_confs[vrf_key])
encoded = view.encode()
encoded['routes_count'] = self._count_routes(params[0], vrf_rf)
else:
return WrongParamResp('No vrf matched by %s' % str(vrf_key))
return CommandsResponse(
STATUS_OK,
encoded
)
@classmethod
def cli_resp_formatter(cls, resp):
if resp.status == STATUS_ERROR:
return Command.cli_resp_formatter(resp)
return pprint.pformat(resp.value)
class All(Command, CountRoutesMixin):
command = 'all'
help_msg = 'shows all vrfs configurations and summary'
def action(self, params):
vrf_confs = self.api.get_vrfs_conf()
view = ConfDictView(vrf_confs)
encoded = view.encode()
for vrf_key, conf in encoded.iteritems():
vrf_name, vrf_rf = vrf_key
conf['routes_count'] = self._count_routes(
vrf_name,
vrf_rf
)
encoded = {str(k): v for k, v in encoded.iteritems()}
return CommandsResponse(
STATUS_OK,
encoded
)
def _count_routes(self, vrf_name, vrf_rf):
return len(self.api.get_single_vrf_routes(vrf_name, vrf_rf))
class Vrf(Routes):
"""Main node for vrf related commands. Acts also as Routes node (that's why
it inherits from it) for legacy reasons.
"""
help_msg = 'vrf related commands subtree'
command = 'vrf'
def __init__(self, *args, **kwargs):
super(Vrf, self).__init__(*args, **kwargs)
self.subcommands.update({
'routes': Routes,
'summary': Summary
})
| apache-2.0 |
sursum/buckanjaren | buckanjaren/lib/python3.5/site-packages/django/contrib/auth/urls.py | 105 | 1140 | # The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import url
from django.contrib.auth import views
urlpatterns = [
url(r'^login/$', views.LoginView.as_view(), name='login'),
url(r'^logout/$', views.LogoutView.as_view(), name='logout'),
url(r'^password_change/$', views.PasswordChangeView.as_view(), name='password_change'),
url(r'^password_change/done/$', views.PasswordChangeDoneView.as_view(), name='password_change_done'),
url(r'^password_reset/$', views.PasswordResetView.as_view(), name='password_reset'),
url(r'^password_reset/done/$', views.PasswordResetDoneView.as_view(), name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
url(r'^reset/done/$', views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
]
| mit |
safchain/contrail-controller | src/config/test/config_systest.py | 16 | 1779 | #!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_systest.py
#
# System tests for analytics
#
import sys
builddir = sys.path[0] + '/../..'
from gevent import monkey
monkey.patch_all()
import os
import unittest
import testtools
import fixtures
import socket
from utils.config_fixture import ConfigFixture
from mockcassandra import mockcassandra
import logging
import time
import pycassa
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class ConfigTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
cls.cassandra_port = ConfigTest.get_free_port()
mockcassandra.start_cassandra(cls.cassandra_port)
@classmethod
def tearDownClass(cls):
mockcassandra.stop_cassandra(cls.cassandra_port)
#@unittest.skip('Skipping non-cassandra test with vizd')
def test_00_startapi(self):
'''
This test starts redis,vizd,opserver and qed
Then it checks that the collector UVE (via redis)
can be accessed from opserver.
'''
logging.info("*** test_00_nocassandra ***")
config_obj = self.useFixture(
ConfigFixture(logging,
builddir, self.cassandra_port))
assert(config_obj.verify_default_project())
return True
# end test_00_nocassandra
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kalahbrown/HueBigSQL | desktop/core/ext-py/requests-2.6.0/requests/packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| apache-2.0 |
mccheung/kbengine | kbe/res/scripts/common/Lib/lib2to3/fixes/fix_metaclass.py | 88 | 8201 | """Fixer for __metaclass__ = X -> (metaclass=X) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# Author: Jack Diederich
# Local imports
from .. import fixer_base
from ..pygram import token
from ..fixer_util import Name, syms, Node, Leaf
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the preferred format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything after the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == '__metaclass__':
# We found a assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = ''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, ')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, '('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
if arglist.children:
arglist.append_child(Leaf(token.COMMA, ','))
meta_txt.prefix = ' '
else:
meta_txt.prefix = ''
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
expr_stmt.children[1].prefix = ''
expr_stmt.children[2].prefix = ''
arglist.append_child(last_metaclass)
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, 'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, '\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, 'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
| lgpl-3.0 |
0359xiaodong/viewfinder | backend/www/test/save_photos_test.py | 13 | 19773 | # Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Test saving photos to a default viewpoint.
"""
__author__ = 'andy@emailscrubbed.com (Andy Kimball)'
import mock
import time
from copy import deepcopy
from functools import partial
from operator import itemgetter
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.db.post import Post
from viewfinder.backend.db.user import User
from viewfinder.backend.www.test import service_base_test
class SavePhotosTestCase(service_base_test.ServiceBaseTestCase):
def setUp(self):
super(SavePhotosTestCase, self).setUp()
self._CreateSimpleTestAssets()
self._existing_vp_id, existing_ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
[self._user2.user_id],
**self._CreateViewpointDict(self._cookie))
self._existing_ep_id = existing_ep_ids[0]
def testSave(self):
"""Save a single photo to the default viewpoint."""
self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids[:1])])
def testSaveMultiple(self):
"""Save two photos to the default viewpoint."""
self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids)])
def testSaveToSelf(self):
"""Save photos from default viewpoint to default viewpoint."""
self._tester.SavePhotos(self._cookie, [(self._episode_id, self._photo_ids)])
def testSaveNoEpisodes(self):
"""Save empty episode list."""
self._tester.SavePhotos(self._cookie, [])
def testSaveMultipleEpisodes(self):
"""Save photos from multiple episodes."""
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id2, self._photo_ids2)],
[self._user3.user_id],
**self._CreateViewpointDict(self._cookie))
self._tester.SavePhotos(self._cookie,
[(self._existing_ep_id, self._photo_ids),
(ep_ids[0], self._photo_ids2)])
def testSaveDuplicatePhotos(self):
"""Save same photos from same source episode to same target episode."""
self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids)])
self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids[:1])])
self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids[1:])])
def testSaveSameEpisode(self):
"""Save different photos from same source episode to same target episode."""
self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids[:1])])
self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids[1:])])
def testSaveDifferentUser(self):
"""Save episode created by a different user."""
self._tester.SavePhotos(self._cookie2, [(self._existing_ep_id, self._photo_ids)])
def testSaveDuplicatePhotos(self):
"""Save same photos from same source episode to same target episode in default viewpoint."""
new_episode_id = Episode.ConstructEpisodeId(time.time(), self._device_ids[0], self._test_id)
self._test_id += 1
share_list = [{'existing_episode_id': self._existing_ep_id,
'new_episode_id': new_episode_id,
'photo_ids': self._photo_ids}]
self._tester.SavePhotos(self._cookie, share_list)
self._tester.SavePhotos(self._cookie, share_list)
def testSaveToSameEpisode(self):
"""Save multiple photos to same target episode in default viewpoint."""
timestamp = time.time()
new_episode_id = Episode.ConstructEpisodeId(timestamp, self._device_ids[0], self._test_id)
self._test_id += 1
share_dict1 = {'existing_episode_id': self._existing_ep_id,
'new_episode_id': new_episode_id,
'photo_ids': self._photo_ids[:1]}
share_dict2 = {'existing_episode_id': self._existing_ep_id,
'new_episode_id': new_episode_id,
'photo_ids': self._photo_ids[1:]}
self._tester.SavePhotos(self._cookie, [share_dict1, share_dict2])
def testSaveAfterRemove(self):
"""Save photos after having removed them."""
# Save photos into user #2's default viewpoint.
ep_ids = self._tester.SavePhotos(self._cookie2, [(self._existing_ep_id, self._photo_ids)])
# Remove photo from the viewpoint.
self._tester.RemovePhotos(self._cookie2, [(ep_ids[0], self._photo_ids[:1])])
post = self._RunAsync(Post.Query, self._client, ep_ids[0], self._photo_ids[0], None)
self.assertIn(Post.REMOVED, post.labels)
# Save again, expecting the REMOVED label to be deleted.
self._tester.SavePhotos(self._cookie2,
[{'existing_episode_id': self._existing_ep_id,
'new_episode_id': ep_ids[0],
'photo_ids': self._photo_ids}])
post = self._RunAsync(Post.Query, self._client, ep_ids[0], self._photo_ids[0], None)
self.assertNotIn(Post.REMOVED, post.labels)
def testSaveAfterUnshare(self):
"""Save photos after having unshared them."""
# Save photos into user #2's default viewpoint.
ep_ids = self._tester.SavePhotos(self._cookie2, [(self._existing_ep_id, self._photo_ids)])
# Unshare photo from the viewpoint.
self._tester.Unshare(self._cookie2, self._user2.private_vp_id, [(ep_ids[0], self._photo_ids[:1])])
post = self._RunAsync(Post.Query, self._client, ep_ids[0], self._photo_ids[0], None)
self.assertIn(Post.UNSHARED, post.labels)
# Save again, expecting the REMOVED label to be deleted.
self._tester.SavePhotos(self._cookie2,
[{'existing_episode_id': self._existing_ep_id,
'new_episode_id': ep_ids[0],
'photo_ids': self._photo_ids}])
post = self._RunAsync(Post.Query, self._client, ep_ids[0], self._photo_ids[0], None)
self.assertNotIn(Post.UNSHARED, post.labels)
def testSaveOneViewpoint(self):
"""Save all episodes from single viewpoint."""
# ------------------------------
# Save viewpoint that doesn't exist (no-op).
# ------------------------------
self._tester.SavePhotos(self._cookie2, viewpoint_ids=['vunk'])
self.assertEqual(self._CountEpisodes(self._cookie2, self._user2.private_vp_id), 0)
# ------------------------------
# Save single episode from single viewpoint.
# ------------------------------
self._tester.SavePhotos(self._cookie2, viewpoint_ids=[self._existing_vp_id])
self.assertEqual(self._CountEpisodes(self._cookie2, self._user2.private_vp_id), 1)
# ------------------------------
# Save multiple episodes from single viewpoint.
# ------------------------------
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids),
(self._episode_id2, self._photo_ids2)],
[self._user2.user_id, self._user3.user_id],
**self._CreateViewpointDict(self._cookie))
self._tester.SavePhotos(self._cookie2, viewpoint_ids=[vp_id])
self.assertEqual(self._CountEpisodes(self._cookie2, self._user2.private_vp_id), 3)
# ------------------------------
# Save multiple episodes, but specify one episode explicitly.
# ------------------------------
ep_save_list = [(ep_ids[0], self._photo_ids[:1])]
self._tester.SavePhotos(self._cookie3, ep_save_list=ep_save_list, viewpoint_ids=[vp_id])
self.assertEqual(self._CountEpisodes(self._cookie3, self._user3.private_vp_id), 2)
# ------------------------------
# Save multiple episodes, and specify all episodes explicitly.
# ------------------------------
ep_save_list = [(ep_ids[0], []), (ep_ids[1], self._photo_ids2)]
self._tester.SavePhotos(self._cookie, ep_save_list=ep_save_list, viewpoint_ids=[vp_id])
self.assertEqual(self._CountEpisodes(self._cookie, self._user.private_vp_id), 4)
def testSaveMultipleViewpoints(self):
"""Save all episodes from multiple viewpoints."""
# ------------------------------
# Save multiple episodes from multiple viewpoints.
# ------------------------------
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids),
(self._episode_id2, self._photo_ids2)],
[self._user2.user_id, self._user3.user_id],
**self._CreateViewpointDict(self._cookie))
self._tester.SavePhotos(self._cookie2, viewpoint_ids=[self._existing_vp_id, vp_id])
self.assertEqual(self._CountEpisodes(self._cookie2, self._user2.private_vp_id), 3)
# ------------------------------
# Save multiple episodes from multiple viewpoints, but specify episode explicitly.
# ------------------------------
ep_save_list = [(ep_ids[0], self._photo_ids), (self._existing_ep_id, self._photo_ids)]
self._tester.SavePhotos(self._cookie, ep_save_list=ep_save_list, viewpoint_ids=[self._existing_vp_id, vp_id])
self.assertEqual(self._CountEpisodes(self._cookie, self._user.private_vp_id), 5)
# ------------------------------
# Save episode explicitly from one viewpoint, then save another entire viewpoint.
# ------------------------------
vp_id2, ep_ids2 = self._tester.ShareNew(self._cookie,
[(self._episode_id2, self._photo_ids2)],
[self._user2.user_id, self._user3.user_id],
**self._CreateViewpointDict(self._cookie))
ep_save_list = [(self._existing_ep_id, self._photo_ids)]
self._tester.SavePhotos(self._cookie2, ep_save_list=ep_save_list, viewpoint_ids=[vp_id2])
self.assertEqual(self._CountEpisodes(self._cookie2, self._user2.private_vp_id), 5)
# ------------------------------
# Save from empty list of viewpoints.
# ------------------------------
self._tester.SavePhotos(self._cookie, viewpoint_ids=[])
def testSaveDuplicateIds(self):
"""Save duplicate episode and photo ids."""
# ------------------------------
# Duplicate photo ids.
# ------------------------------
self._tester.SavePhotos(self._cookie,
[(self._existing_ep_id, self._photo_ids + self._photo_ids)])
self.assertEqual(self._CountEpisodes(self._cookie, self._user.private_vp_id), 3)
# ------------------------------
# Duplicate episode ids.
# ------------------------------
self._tester.SavePhotos(self._cookie,
[(self._existing_ep_id, self._photo_ids),
(self._existing_ep_id, self._photo_ids)])
self.assertEqual(self._CountEpisodes(self._cookie, self._user.private_vp_id), 5)
# ------------------------------
# Save same episode to same target episode multiple times.
# ------------------------------
new_episode_id = Episode.ConstructEpisodeId(time.time(), self._device_ids[0], self._test_id)
self._test_id += 1
self._tester.SavePhotos(self._cookie,
[{'existing_episode_id': self._existing_ep_id,
'new_episode_id': new_episode_id,
'photo_ids': self._photo_ids[:1]},
{'existing_episode_id': self._existing_ep_id,
'new_episode_id': new_episode_id,
'photo_ids': self._photo_ids}])
self.assertEqual(self._CountEpisodes(self._cookie, self._user.private_vp_id), 6)
# ------------------------------
# Duplicate viewpoint ids.
# ------------------------------
vp_id, ep_ids = self._tester.ShareNew(self._cookie,
[(self._episode_id2, self._photo_ids2)],
[self._user3.user_id],
**self._CreateViewpointDict(self._cookie))
self._tester.SavePhotos(self._cookie,
ep_save_list=[(self._existing_ep_id, self._photo_ids),
(self._existing_ep_id, self._photo_ids)],
viewpoint_ids=[vp_id, vp_id])
self.assertEqual(self._CountEpisodes(self._cookie, self._user.private_vp_id), 9)
def testSaveViewpointNoPermission(self):
"""ERROR: Try to save viewpoint with no permissions."""
self.assertRaisesHttpError(403, self._tester.SavePhotos, self._cookie3, viewpoint_ids=[self._existing_vp_id])
def testSaveFromMultipleParents(self):
"""ERROR: Try to save to the same episode from multiple parent episodes."""
save_ep_ids = self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids)])
share_ep_ids = self._tester.ShareExisting(self._cookie,
self._existing_vp_id,
[(self._episode_id2, self._photo_ids2)])
share_dict = {'existing_episode_id': share_ep_ids[0],
'new_episode_id': save_ep_ids[0],
'photo_ids': self._photo_ids}
self.assertRaisesHttpError(400, self._tester.SavePhotos, self._cookie, [share_dict])
@mock.patch.object(Operation, 'FAILPOINTS_ENABLED', True)
def testIdempotency(self):
"""Force op failure in order to test idempotency."""
self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids[:1])])
self._tester.SavePhotos(self._cookie, [(self._existing_ep_id, self._photo_ids)])
def testSaveNoAccess(self):
"""ERROR: Try to share episodes from viewpoint which user does not follow."""
self.assertRaisesHttpError(403, self._tester.SavePhotos, self._cookie3,
[(self._existing_ep_id, self._photo_ids)])
def testSaveInvalidEpisode(self):
"""ERROR: Try to save a non-existing episode."""
self.assertRaisesHttpError(400, self._tester.SavePhotos, self._cookie2,
[('eunknown', self._photo_ids)])
def testWrongDeviceId(self):
"""ERROR: Try to create an episode using a device id that is different
than the one in the user cookie.
"""
save_list = [self._tester.CreateCopyDict(self._cookie2, self._existing_ep_id, self._photo_ids)]
self.assertRaisesHttpError(403, self._tester.SavePhotos, self._cookie, save_list)
def testSaveToSameEpisode(self):
"""ERROR: Try to save from two source episodes to the same target episode."""
share_ep_ids = self._tester.ShareExisting(self._cookie,
self._existing_vp_id,
[(self._episode_id2, self._photo_ids2)])
new_episode_id = Episode.ConstructEpisodeId(time.time(), self._device_ids[0], self._test_id)
self._test_id += 1
self.assertRaisesHttpError(400,
self._tester.SavePhotos,
self._cookie,
[{'existing_episode_id': self._existing_ep_id,
'new_episode_id': new_episode_id,
'photo_ids': self._photo_ids},
{'existing_episode_id': share_ep_ids[0],
'new_episode_id': new_episode_id,
'photo_ids': self._photo_ids2}])
def _TestSavePhotos(tester, user_cookie, request_dict):
"""Called by the ServiceTester in order to test save_photos service API call."""
validator = tester.validator
user_id, device_id = tester.GetIdsFromCookie(user_cookie)
request_dict = deepcopy(request_dict)
# Send save_photos request.
actual_dict = tester.SendRequest('save_photos', user_cookie, request_dict)
_ValidateSavePhotos(tester, user_id, device_id, request_dict)
tester._CompareResponseDicts('save_photos', user_id, request_dict, {}, actual_dict)
return actual_dict
def _ValidateSavePhotos(tester, user_id, device_id, request_dict):
"""Validates that episodes and photos listed in the request have been saved to the given
user's default viewpoint. Validates that the correct activity and notifications have been
created for a save_photos operation.
"""
validator = tester.validator
op_dict = tester._DeriveNotificationOpDict(user_id, device_id, request_dict)
user = validator.GetModelObject(User, user_id)
request_ep_dicts = request_dict.get('episodes', [])
# Need to validate episodes specified in the request.
save_ep_dicts = {}
for ep_dict in request_ep_dicts:
new_episode_id = ep_dict['new_episode_id']
if new_episode_id in save_ep_dicts:
save_ep_dicts[new_episode_id]['photo_ids'].extend(ep_dict['photo_ids'])
else:
save_ep_dicts[new_episode_id] = ep_dict
# Need to validate episodes from viewpoints specified in the request.
if 'viewpoint_ids' in request_dict:
for viewpoint_id in set(request_dict['viewpoint_ids']):
source_eps = validator.QueryModelObjects(Episode, predicate=lambda e: e.viewpoint_id == viewpoint_id)
for source_ep in source_eps:
# Find the id of the target episode.
query_expr = ('episode.parent_ep_id={ep_id} & episode.viewpoint_id={vp_id}',
{'ep_id': source_ep.episode_id, 'vp_id': user.private_vp_id})
target_ep_key = util.GetSingleListItem(tester._RunAsync(Episode.IndexQueryKeys, validator.client, query_expr))
posts = validator.QueryModelObjects(Post, source_ep.episode_id)
save_ep_dicts[target_ep_key.hash_key] = {'existing_episode_id': source_ep.episode_id,
'new_episode_id': target_ep_key.hash_key,
'photo_ids': [post.photo_id for post in posts]}
save_ep_dicts = sorted([{'existing_episode_id': ep_dict['existing_episode_id'],
'new_episode_id': ep_dict['new_episode_id'],
'photo_ids': sorted(set(ep_dict['photo_ids']))}
for ep_dict in save_ep_dicts.itervalues()],
key=itemgetter('new_episode_id'))
# Validate all episodes and posts are created.
validator.ValidateCopyEpisodes(op_dict, user.private_vp_id, save_ep_dicts)
# Validate activity and notifications for the save.
activity_dict = {'name': 'save_photos',
'activity_id': request_dict['activity']['activity_id'],
'timestamp': request_dict['activity']['timestamp'],
'episodes': [{'episode_id': ep_dict['new_episode_id'],
'photo_ids': ep_dict['photo_ids']}
for ep_dict in save_ep_dicts]}
invalidate = {'episodes': [{'episode_id': ep_dict['new_episode_id'],
'get_attributes': True,
'get_photos': True}
for ep_dict in save_ep_dicts]}
validator.ValidateFollowerNotifications(user.private_vp_id,
activity_dict,
op_dict,
invalidate)
validator.ValidateViewpointAccounting(user.private_vp_id)
| apache-2.0 |
mhaessig/servo | tests/wpt/css-tests/tools/manifest/item.py | 62 | 6322 | import os
from six.moves.urllib.parse import urljoin
from abc import ABCMeta, abstractmethod, abstractproperty
def get_source_file(source_files, tests_root, manifest, path):
def make_new():
from .sourcefile import SourceFile
return SourceFile(tests_root, path, manifest.url_base)
if source_files is None:
return make_new()
if path not in source_files:
source_files[path] = make_new()
return source_files[path]
class ManifestItem(object):
__metaclass__ = ABCMeta
item_type = None
def __init__(self, source_file, manifest=None):
self.manifest = manifest
self.source_file = source_file
@abstractproperty
def id(self):
"""The test's id (usually its url)"""
pass
@property
def path(self):
"""The test path relative to the test_root"""
return self.source_file.rel_path
@property
def https(self):
return "https" in self.source_file.meta_flags
def key(self):
"""A unique identifier for the test"""
return (self.item_type, self.id)
def meta_key(self):
"""Extra metadata that doesn't form part of the test identity, but for
which changes mean regenerating the manifest (e.g. the test timeout."""
return ()
def __eq__(self, other):
if not hasattr(other, "key"):
return False
return self.key() == other.key()
def __hash__(self):
return hash(self.key() + self.meta_key())
def __repr__(self):
return "<%s.%s id=%s, path=%s>" % (self.__module__, self.__class__.__name__, self.id, self.path)
def to_json(self):
return [{}]
@classmethod
def from_json(cls, manifest, tests_root, path, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest, path)
return cls(source_file,
manifest=manifest)
class URLManifestItem(ManifestItem):
def __init__(self, source_file, url, url_base="/", manifest=None):
ManifestItem.__init__(self, source_file, manifest=manifest)
self._url = url
self.url_base = url_base
@property
def id(self):
return self.url
@property
def url(self):
return urljoin(self.url_base, self._url)
def to_json(self):
rv = [self._url, {}]
return rv
@classmethod
def from_json(cls, manifest, tests_root, path, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest, path)
url, extras = obj
return cls(source_file,
url,
url_base=manifest.url_base,
manifest=manifest)
class TestharnessTest(URLManifestItem):
item_type = "testharness"
def __init__(self, source_file, url, url_base="/", timeout=None, manifest=None):
URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
self.timeout = timeout
def meta_key(self):
return (self.timeout,)
def to_json(self):
rv = URLManifestItem.to_json(self)
if self.timeout is not None:
rv[-1]["timeout"] = self.timeout
return rv
@classmethod
def from_json(cls, manifest, tests_root, path, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest, path)
url, extras = obj
return cls(source_file,
url,
url_base=manifest.url_base,
timeout=extras.get("timeout"),
manifest=manifest)
class RefTestNode(URLManifestItem):
item_type = "reftest_node"
def __init__(self, source_file, url, references, url_base="/", timeout=None,
viewport_size=None, dpi=None, manifest=None):
URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
for _, ref_type in references:
if ref_type not in ["==", "!="]:
raise ValueError("Unrecognised ref_type %s" % ref_type)
self.references = tuple(references)
self.timeout = timeout
self.viewport_size = viewport_size
self.dpi = dpi
def meta_key(self):
return (self.timeout, self.viewport_size, self.dpi)
def to_json(self):
rv = [self.url, self.references, {}]
extras = rv[-1]
if self.timeout is not None:
extras["timeout"] = self.timeout
if self.viewport_size is not None:
extras["viewport_size"] = self.viewport_size
if self.dpi is not None:
extras["dpi"] = self.dpi
return rv
@classmethod
def from_json(cls, manifest, tests_root, path, obj, source_files=None):
source_file = get_source_file(source_files, tests_root, manifest, path)
url, references, extras = obj
return cls(source_file,
url,
references,
url_base=manifest.url_base,
timeout=extras.get("timeout"),
viewport_size=extras.get("viewport_size"),
dpi=extras.get("dpi"),
manifest=manifest)
def to_RefTest(self):
if type(self) == RefTest:
return self
rv = RefTest.__new__(RefTest)
rv.__dict__.update(self.__dict__)
return rv
def to_RefTestNode(self):
if type(self) == RefTestNode:
return self
rv = RefTestNode.__new__(RefTestNode)
rv.__dict__.update(self.__dict__)
return rv
class RefTest(RefTestNode):
item_type = "reftest"
class ManualTest(URLManifestItem):
item_type = "manual"
class ConformanceCheckerTest(URLManifestItem):
item_type = "conformancechecker"
class VisualTest(URLManifestItem):
item_type = "visual"
class Stub(URLManifestItem):
item_type = "stub"
class WebdriverSpecTest(URLManifestItem):
item_type = "wdspec"
def __init__(self, source_file, url, url_base="/", timeout=None, manifest=None):
URLManifestItem.__init__(self, source_file, url, url_base=url_base, manifest=manifest)
self.timeout = timeout
class SupportFile(ManifestItem):
item_type = "support"
@property
def id(self):
return self.source_file.rel_path
| mpl-2.0 |
royosherove/bitcoinxt | qa/rpc-tests/txn_doublespend.py | 101 | 6478 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with a double-spend conflict
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from decimal import Decimal
import os
import shutil
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1240 BTC to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 BTC coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 1219)
assert_equal(self.nodes[0].getbalance("bar"), 29)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-1219
- 29
-1240
+ 100
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 1240)
if __name__ == '__main__':
TxnMallTest().main()
| mit |
andrewyoung1991/abjad | abjad/tools/agenttools/IterationAgent.py | 1 | 48284 | # -*- encoding: utf-8 -*-
from __future__ import print_function
import collections
from abjad.tools import abctools
from abjad.tools import durationtools
from abjad.tools import scoretools
from abjad.tools import sequencetools
from abjad.tools import spannertools
from abjad.tools.topleveltools import inspect_
from abjad.tools.topleveltools import iterate
class IterationAgent(abctools.AbjadObject):
r'''A wrapper around the Abjad iteration methods.
.. container:: example
::
>>> staff = Staff("c'4 e'4 d'4 f'4")
>>> show(staff) # doctest: +SKIP
::
>>> iterate(staff[2:])
IterationAgent(client=SliceSelection(Note("d'4"), Note("f'4")))
'''
### CLASS VARIABLES ###
__slots__ = (
'_client',
)
### INITIALIZER ###
def __init__(self, client=None):
self._client = client
### PUBLIC PROPERTIES ###
@property
def client(self):
r'''Client of iteration agent.
Returns selection.
'''
return self._client
### PUBLIC METHODS ###
def by_class(
self,
prototype=None,
reverse=False,
start=0,
stop=None,
):
r'''Iterate components forward in `expr`.
::
>>> staff = Staff()
>>> staff.append(Measure((2, 8), "c'8 d'8"))
>>> staff.append(Measure((2, 8), "e'8 f'8"))
>>> staff.append(Measure((2, 8), "g'8 a'8"))
.. doctest::
>>> print(format(staff))
\new Staff {
{
\time 2/8
c'8
d'8
}
{
e'8
f'8
}
{
g'8
a'8
}
}
::
>>> for note in iterate(staff).by_class(Note):
... note
...
Note("c'8")
Note("d'8")
Note("e'8")
Note("f'8")
Note("g'8")
Note("a'8")
Use optional `start` and `stop` keyword parameters to control
start and stop indices of iteration:
::
>>> for note in iterate(staff).by_class(
... Note, start=0, stop=3):
... note
...
Note("c'8")
Note("d'8")
Note("e'8")
::
>>> for note in iterate(staff).by_class(
... Note, start=2, stop=4):
... note
...
Note("e'8")
Note("f'8")
Yield right-to-left notes in `expr`:
::
>>> staff = Staff()
>>> staff.append(Measure((2, 8), "c'8 d'8"))
>>> staff.append(Measure((2, 8), "e'8 f'8"))
>>> staff.append(Measure((2, 8), "g'8 a'8"))
.. doctest::
>>> print(format(staff))
\new Staff {
{
\time 2/8
c'8
d'8
}
{
e'8
f'8
}
{
g'8
a'8
}
}
::
>>> for note in iterate(staff).by_class(
... Note, reverse=True):
... note
...
Note("a'8")
Note("g'8")
Note("f'8")
Note("e'8")
Note("d'8")
Note("c'8")
Use optional `start` and `stop` keyword parameters to control
indices of iteration:
::
>>> for note in iterate(staff).by_class(
... Note, reverse=True, start=3):
... note
...
Note("e'8")
Note("d'8")
Note("c'8")
::
>>> for note in iterate(staff).by_class(
... Note, reverse=True, start=0, stop=3):
... note
...
Note("a'8")
Note("g'8")
Note("f'8")
::
>>> for note in iterate(staff).by_class(
... Note, reverse=True, start=2, stop=4):
... note
...
Note("f'8")
Note("e'8")
Iterates across different logical voices.
Returns generator.
'''
prototype = prototype or scoretools.Component
def component_iterator(expr, component_class, reverse=False):
if isinstance(expr, component_class):
yield expr
if isinstance(expr, (list, tuple, spannertools.Spanner)) or \
hasattr(expr, '_music'):
if hasattr(expr, '_music'):
expr = expr._music
if reverse:
expr = reversed(expr)
for component in expr:
for x in component_iterator(
component, component_class, reverse=reverse):
yield x
def subrange(iter, start=0, stop=None):
# if start<0, then 'stop-start' gives a funny result
# don not have to check stop>=start
# because range(stop-start) already handles that
assert 0 <= start
try:
# skip the first few elements, up to 'start' of them:
for i in range(start):
# no yield to swallow the results
next(iter)
# now generate (stop-start) elements
# (or all elements if stop is none)
if stop is None:
for x in iter:
yield x
else:
for i in range(stop - start):
yield next(iter)
except StopIteration:
# this happens if we exhaust the list before
# we generate a total of 'stop' elements
pass
return subrange(
component_iterator(
self._client,
prototype,
reverse=reverse),
start,
stop,
)
def by_components_and_grace_containers(self, prototype=None):
r'''Iterate components of `component_class` forward in `expr`:
::
>>> voice = Voice("c'8 d'8 e'8 f'8")
>>> beam = spannertools.Beam()
>>> attach(beam, voice[:])
::
>>> grace_notes = [Note("c'16"), Note("d'16")]
>>> grace = scoretools.GraceContainer(
... grace_notes,
... kind='grace',
... )
>>> attach(grace, voice[1])
::
>>> after_grace_notes = [Note("e'16"), Note("f'16")]
>>> after_grace = scoretools.GraceContainer(
... after_grace_notes,
... kind='after')
>>> attach(after_grace, voice[1])
.. doctest::
>>> print(format(voice))
\new Voice {
c'8 [
\grace {
c'16
d'16
}
\afterGrace
d'8
{
e'16
f'16
}
e'8
f'8 ]
}
::
>>> x = iterate(voice).by_components_and_grace_containers(Note)
>>> for note in x:
... note
...
Note("c'8")
Note("c'16")
Note("d'16")
Note("d'8")
Note("e'16")
Note("f'16")
Note("e'8")
Note("f'8")
Include grace leaves before main leaves.
Include grace leaves after main leaves.
'''
prototype = prototype or scoretools.Leaf
if self._client._grace is not None:
for component in self._client._grace:
for x in iterate(component).by_components_and_grace_containers(
prototype,
):
yield x
if isinstance(self._client, prototype):
yield self._client
if self._client._after_grace is not None:
for component in self._client._after_grace:
for x in iterate(component).by_components_and_grace_containers(
prototype,
):
yield x
elif isinstance(self._client, prototype):
yield self._client
if isinstance(self._client, (list, tuple)):
for component in self._client:
for x in iterate(component).by_components_and_grace_containers(
prototype,
):
yield x
if hasattr(self._client, '_music'):
for component in self._client._music:
for x in iterate(component).by_components_and_grace_containers(
prototype,
):
yield x
def by_leaf_pair(self):
r'''Iterate leaf pairs forward in `expr`:
::
>>> score = Score([])
>>> notes = [Note("c'8"), Note("d'8"), Note("e'8"),
... Note("f'8"), Note("g'4")]
>>> score.append(Staff(notes))
>>> notes = [Note(x, (1, 4)) for x in [-12, -15, -17]]
>>> score.append(Staff(notes))
>>> clef = Clef('bass')
>>> attach(clef, score[1])
>>> show(score) # doctest: +SKIP
.. doctest::
>>> print(format(score))
\new Score <<
\new Staff {
c'8
d'8
e'8
f'8
g'4
}
\new Staff {
\clef "bass"
c4
a,4
g,4
}
>>
::
>>> for pair in iterate(score).by_leaf_pair():
... pair
(Note("c'8"), Note('c4'))
(Note("c'8"), Note("d'8"))
(Note('c4'), Note("d'8"))
(Note("d'8"), Note("e'8"))
(Note("d'8"), Note('a,4'))
(Note('c4'), Note("e'8"))
(Note('c4'), Note('a,4'))
(Note("e'8"), Note('a,4'))
(Note("e'8"), Note("f'8"))
(Note('a,4'), Note("f'8"))
(Note("f'8"), Note("g'4"))
(Note("f'8"), Note('g,4'))
(Note('a,4'), Note("g'4"))
(Note('a,4'), Note('g,4'))
(Note("g'4"), Note('g,4'))
Iterate leaf pairs left-to-right and top-to-bottom.
Returns generator.
'''
vertical_moments = self.by_vertical_moment()
for moment_1, moment_2 in \
sequencetools.iterate_sequence_nwise(vertical_moments):
for pair in sequencetools.yield_all_unordered_pairs_of_sequence(
moment_1.start_leaves):
yield pair
pairs = sequencetools.yield_all_pairs_between_sequences(
moment_1.leaves, moment_2.start_leaves)
for pair in pairs:
yield pair
else:
for pair in sequencetools.yield_all_unordered_pairs_of_sequence(
moment_2.start_leaves):
yield pair
def by_logical_tie(
self,
nontrivial=False,
pitched=False,
reverse=False,
):
r'''Iterate logical ties forward in `expr`:
::
>>> staff = Staff(r"c'4 ~ \times 2/3 { c'16 d'8 } e'8 f'4 ~ f'16")
.. doctest::
>>> print(format(staff))
\new Staff {
c'4 ~
\times 2/3 {
c'16
d'8
}
e'8
f'4 ~
f'16
}
::
>>> for x in iterate(staff).by_logical_tie():
... x
...
LogicalTie(Note("c'4"), Note("c'16"))
LogicalTie(Note("d'8"),)
LogicalTie(Note("e'8"),)
LogicalTie(Note("f'4"), Note("f'16"))
Iterate logical ties backward in `expr`:
::
>>> for x in iterate(staff).by_logical_tie(reverse=True):
... x
...
LogicalTie(Note("f'4"), Note("f'16"))
LogicalTie(Note("e'8"),)
LogicalTie(Note("d'8"),)
LogicalTie(Note("c'4"), Note("c'16"))
Iterate pitched logical ties in `expr`:
::
>>> for x in iterate(staff).by_logical_tie(pitched=True):
... x
...
LogicalTie(Note("c'4"), Note("c'16"))
LogicalTie(Note("d'8"),)
LogicalTie(Note("e'8"),)
LogicalTie(Note("f'4"), Note("f'16"))
Iterate nontrivial logical ties in `expr`:
::
>>> for x in iterate(staff).by_logical_tie(nontrivial=True):
... x
...
LogicalTie(Note("c'4"), Note("c'16"))
LogicalTie(Note("f'4"), Note("f'16"))
Returns generator.
'''
nontrivial = bool(nontrivial)
prototype = scoretools.Leaf
if pitched:
prototype = (scoretools.Chord, scoretools.Note)
if not reverse:
for leaf in self.by_class(prototype):
tie_spanners = leaf._get_spanners(spannertools.Tie)
if not tie_spanners or \
tuple(tie_spanners)[0]._is_my_last_leaf(leaf):
logical_tie = leaf._get_logical_tie()
if not nontrivial or not logical_tie.is_trivial:
yield logical_tie
else:
for leaf in self.by_class(prototype, reverse=True):
tie_spanners = leaf._get_spanners(spannertools.Tie)
if not(tie_spanners) or \
tuple(tie_spanners)[0]._is_my_first_leaf(leaf):
logical_tie = leaf._get_logical_tie()
if not nontrivial or not logical_tie.is_trivial:
yield logical_tie
def by_logical_voice(
self,
component_class,
logical_voice,
reverse=False,
):
r'''Yield left-to-right instances of `component_class` in `expr`
with `logical_voice`:
::
>>> container_1 = Container([Voice("c'8 d'8"), Voice("e'8 f'8")])
>>> container_1.is_simultaneous = True
>>> container_1[0].name = 'voice 1'
>>> container_1[1].name = 'voice 2'
>>> container_2 = Container([Voice("g'8 a'8"), Voice("b'8 c''8")])
>>> container_2.is_simultaneous = True
>>> container_2[0].name = 'voice 1'
>>> container_2[1].name = 'voice 2'
>>> staff = Staff([container_1, container_2])
>>> show(staff) # doctest: +SKIP
.. doctest::
>>> print(format(staff))
\new Staff {
<<
\context Voice = "voice 1" {
c'8
d'8
}
\context Voice = "voice 2" {
e'8
f'8
}
>>
<<
\context Voice = "voice 1" {
g'8
a'8
}
\context Voice = "voice 2" {
b'8
c''8
}
>>
}
::
>>> leaf = staff.select_leaves(allow_discontiguous_leaves=True)[0]
>>> signature = inspect_(leaf).get_parentage().logical_voice
>>> for x in iterate(staff).by_logical_voice(Note, signature):
... x
...
Note("c'8")
Note("d'8")
Note("g'8")
Note("a'8")
Returns generator.
'''
if isinstance(self._client, component_class) and \
self._client._get_parentage().logical_voice == \
logical_voice:
yield self._client
if not reverse:
if isinstance(self._client, (list, tuple)):
for component in self._client:
for x in iterate(component).by_logical_voice(
component_class,
logical_voice,
):
yield x
if hasattr(self._client, '_music'):
for component in self._client._music:
for x in iterate(component).by_logical_voice(
component_class,
logical_voice,
):
yield x
else:
if isinstance(self._client, (list, tuple)):
for component in reversed(self._client):
for x in iterate(component).by_logical_voice(
component_class,
logical_voice,
reverse=True,
):
yield x
if hasattr(self._client, '_music'):
for component in reversed(self._client._music):
for x in iterate(component).by_logical_voice(
component_class,
logical_voice,
reverse=True,
):
yield x
def by_logical_voice_from_component(
self,
component_class=None,
reverse=False,
):
r'''Iterate logical voice forward from `component` and yield instances
of `component_class`.
::
>>> container_1 = Container([Voice("c'8 d'8"), Voice("e'8 f'8")])
>>> container_1.is_simultaneous = True
>>> container_1[0].name = 'voice 1'
>>> container_1[1].name = 'voice 2'
>>> container_2 = Container([Voice("g'8 a'8"), Voice("b'8 c''8")])
>>> container_2.is_simultaneous = True
>>> container_2[0].name = 'voice 1'
>>> container_2[1].name = 'voice 2'
>>> staff = Staff([container_1, container_2])
>>> show(staff) # doctest: +SKIP
.. doctest::
>>> print(format(staff))
\new Staff {
<<
\context Voice = "voice 1" {
c'8
d'8
}
\context Voice = "voice 2" {
e'8
f'8
}
>>
<<
\context Voice = "voice 1" {
g'8
a'8
}
\context Voice = "voice 2" {
b'8
c''8
}
>>
}
Starting from the first leaf in score:
::
>>> leaf = staff.select_leaves(allow_discontiguous_leaves=True)[0]
>>> for x in iterate(leaf).by_logical_voice_from_component(Note):
... x
...
Note("c'8")
Note("d'8")
Note("g'8")
Note("a'8")
Starting from the second leaf in score:
::
>>> leaf = staff.select_leaves(allow_discontiguous_leaves=True)[1]
>>> for x in iterate(leaf).by_logical_voice_from_component(Note):
... x
...
Note("d'8")
Note("g'8")
Note("a'8")
Yield all components in logical voice:
::
>>> leaf = staff.select_leaves(allow_discontiguous_leaves=True)[0]
>>> for x in iterate(leaf).by_logical_voice_from_component():
... x
...
Note("c'8")
Voice("c'8 d'8")
Note("d'8")
Voice("g'8 a'8")
Note("g'8")
Note("a'8")
Iterate logical voice backward from `component` and yield instances
of `component_class`, starting from the last leaf in score:
::
>>> leaf = staff.select_leaves(allow_discontiguous_leaves=True)[-1]
>>> for x in iterate(leaf).by_logical_voice_from_component(
... Note,
... reverse=True,
... ):
... x
Note("c''8")
Note("b'8")
Note("f'8")
Note("e'8")
Yield all components in logical voice:
::
>>> leaf = staff.select_leaves(allow_discontiguous_leaves=True)[-1]
>>> for x in iterate(leaf).by_logical_voice_from_component(
... reverse=True,
... ):
... x
Note("c''8")
Voice("b'8 c''8")
Note("b'8")
Voice("e'8 f'8")
Note("f'8")
Note("e'8")
Returns generator.
'''
# set default class
if component_class is None:
component_class = scoretools.Component
# save logical voice signature of input component
signature = self._client._get_parentage().logical_voice
# iterate component depth-first allowing to crawl UP into score
if not reverse:
for x in iterate(self._client).depth_first(
capped=False):
if isinstance(x, component_class):
if x._get_parentage().logical_voice == signature:
yield x
else:
for x in iterate(self._client).depth_first(
capped=False, direction=Right):
if isinstance(x, component_class):
if x._get_parentage().logical_voice == signature:
yield x
def by_run(self, classes):
r'''Iterate runs in expression.
.. container:: example
**Example 1.** Iterate runs of notes and chords at only the
top level of score:
::
>>> staff = Staff(r"\times 2/3 { c'8 d'8 r8 }")
>>> staff.append(r"\times 2/3 { r8 <e' g'>8 <f' a'>8 }")
>>> staff.extend("g'8 a'8 r8 r8 <b' d''>8 <c'' e''>8")
.. doctest::
>>> print(format(staff))
\new Staff {
\times 2/3 {
c'8
d'8
r8
}
\times 2/3 {
r8
<e' g'>8
<f' a'>8
}
g'8
a'8
r8
r8
<b' d''>8
<c'' e''>8
}
::
>>> for group in iterate(staff[:]).by_run((Note, Chord)):
... group
...
(Note("g'8"), Note("a'8"))
(Chord("<b' d''>8"), Chord("<c'' e''>8"))
.. container:: example
**Example 2.** Iterate runs of notes and chords at all levels of
score:
::
>>> leaves = iterate(staff).by_class(scoretools.Leaf)
::
>>> for group in iterate(leaves).by_run((Note, Chord)):
... group
...
(Note("c'8"), Note("d'8"))
(Chord("<e' g'>8"), Chord("<f' a'>8"), Note("g'8"), Note("a'8"))
(Chord("<b' d''>8"), Chord("<c'' e''>8"))
Returns generator.
'''
from abjad.tools import selectiontools
if not isinstance(classes, collections.Sequence):
classes = (classes,)
sequence = selectiontools.SliceSelection(self._client)
current_group = ()
for group in sequence.group_by(type):
if any(isinstance(group[0], class_) for class_ in classes):
#if type(group[0]) in classes:
current_group = current_group + group
elif current_group:
yield current_group
current_group = ()
if current_group:
yield current_group
def by_semantic_voice(
self,
reverse=False,
start=0,
stop=None,
):
r'''Iterate semantic voices forward in `expr`:
::
>>> pairs = [(3, 8), (5, 16), (5, 16)]
>>> measures = scoretools.make_spacer_skip_measures(pairs)
>>> time_signature_voice = Voice(measures)
>>> time_signature_voice.name = 'TimeSignatuerVoice'
>>> time_signature_voice.is_nonsemantic = True
>>> music_voice = Voice("c'4. d'4 e'16 f'4 g'16")
>>> music_voice.name = 'MusicVoice'
>>> staff = Staff([time_signature_voice, music_voice])
>>> staff.is_simultaneous = True
.. doctest::
>>> print(format(staff))
\new Staff <<
\context Voice = "TimeSignatuerVoice" {
{
\time 3/8
s1 * 3/8
}
{
\time 5/16
s1 * 5/16
}
{
s1 * 5/16
}
}
\context Voice = "MusicVoice" {
c'4.
d'4
e'16
f'4
g'16
}
>>
>>> for voice in iterate(staff).by_semantic_voice():
... voice
...
Voice("c'4. d'4 e'16 f'4 g'16")
Iterate semantic voices backward in `expr`:
::
>>> for voice in iterate(staff).by_semantic_voice(reverse=True):
... voice
...
Voice("c'4. d'4 e'16 f'4 g'16")
Returns generator.
'''
for voice in self.by_class(
scoretools.Voice,
reverse=reverse,
start=start,
stop=stop,
):
if not voice.is_nonsemantic:
yield voice
def by_spanner(
self,
prototype=None,
reverse=False,
):
r'''Iterates spanners forward in `expr`:
::
>>> staff = Staff("c'8 d'8 e'8 f'8 g'8 a'8 f'8 b'8 c''8")
>>> attach(Slur(), staff[:4])
>>> attach(Slur(), staff[4:])
>>> attach(Beam(), staff[:])
::
>>> for spanner in iterate(staff).by_spanner():
... spanner
...
Beam("c'8, d'8, ... [5] ..., b'8, c''8")
Slur("c'8, d'8, e'8, f'8")
Slur("g'8, a'8, f'8, b'8, c''8")
Iterates spanners backward in `expr`:
::
>>> for spanner in iterate(staff).by_spanner(reverse=True):
... spanner
...
Beam("c'8, d'8, ... [5] ..., b'8, c''8")
Slur("g'8, a'8, f'8, b'8, c''8")
Slur("c'8, d'8, e'8, f'8")
Returns generator.
'''
visited_spanners = set()
for component in self.by_class(reverse=reverse):
spanners = inspect_(component).get_spanners(prototype=prototype)
spanners = sorted(spanners,
key=lambda x: (
type(x).__name__,
inspect_(x).get_timespan(),
),
)
for spanner in spanners:
if spanner in visited_spanners:
continue
visited_spanners.add(spanner)
yield spanner
def by_timeline(
self,
component_class=None,
reverse=False,
):
r'''Iterate timeline forward in `expr`:
::
>>> score = Score([])
>>> score.append(Staff("c'4 d'4 e'4 f'4"))
>>> score.append(Staff("g'8 a'8 b'8 c''8"))
>>> show(score) # doctest: +SKIP
.. doctest::
>>> print(format(score))
\new Score <<
\new Staff {
c'4
d'4
e'4
f'4
}
\new Staff {
g'8
a'8
b'8
c''8
}
>>
::
>>> for leaf in iterate(score).by_timeline():
... leaf
...
Note("c'4")
Note("g'8")
Note("a'8")
Note("d'4")
Note("b'8")
Note("c''8")
Note("e'4")
Note("f'4")
Iterate timeline backward in `expr`:
::
>>> for leaf in iterate(score).by_timeline(reverse=True):
... leaf
...
Note("f'4")
Note("e'4")
Note("d'4")
Note("c''8")
Note("b'8")
Note("c'4")
Note("a'8")
Note("g'8")
Iterate leaves when `component_class` is none.
.. todo:: Optimize to avoid behind-the-scenes full-score traversal.
'''
if component_class is None:
component_class = scoretools.Leaf
if isinstance(self.client, scoretools.Component):
components = [self.client]
else:
components = list(self.client)
if not reverse:
while components:
#print('STEP:')
#for component in components:
# print(' ', component)
#print()
current_start_offset = min(
_._get_timespan().start_offset
for _ in components
)
components.sort(
key=lambda x: x._get_parentage().score_index,
reverse=True,
)
components_to_process = components[:]
components = []
while components_to_process:
component = components_to_process.pop()
start_offset = component._get_timespan().start_offset
#print(' COMPONENT:', component)
if current_start_offset < start_offset:
components.append(component)
#print(' TOO EARLY')
continue
if isinstance(component, component_class):
#print(' YIELDING', component)
yield component
sibling = component._get_sibling(1)
if sibling is not None:
#print(' SIBLING:', sibling)
components.append(sibling)
if not isinstance(component, scoretools.Container):
continue
if not len(component):
continue
if not component.is_simultaneous:
components_to_process.append(component[0])
else:
components_to_process.extend(reversed(component))
else:
while components:
#print('STEP')
#print()
current_stop_offset = max(
_._get_timespan().stop_offset
for _ in components
)
components.sort(
key=lambda x: x._get_parentage().score_index,
reverse=True,
)
components_to_process = components[:]
components = []
while components_to_process:
component = components_to_process.pop()
stop_offset = component._get_timespan().stop_offset
#print('\tCOMPONENT:', component)
if stop_offset < current_stop_offset:
components.insert(0, component)
continue
if isinstance(component, component_class):
yield component
sibling = component._get_sibling(-1)
if sibling is not None:
components.insert(0, sibling)
if not isinstance(component, scoretools.Container):
continue
if not len(component):
continue
if not component.is_simultaneous:
components_to_process.append(component[-1])
else:
components_to_process.extend(reversed(component))
def by_timeline_from_component(
self,
component_class=None,
reverse=False,
):
r'''Iterate timeline forward from `component`:
::
>>> score = Score([])
>>> score.append(Staff("c'4 d'4 e'4 f'4"))
>>> score.append(Staff("g'8 a'8 b'8 c''8"))
>>> show(score) # doctest: +SKIP
.. doctest::
>>> print(format(score))
\new Score <<
\new Staff {
c'4
d'4
e'4
f'4
}
\new Staff {
g'8
a'8
b'8
c''8
}
>>
::
>>> for leaf in iterate(score[1][2]).by_timeline_from_component():
... leaf
...
Note("b'8")
Note("c''8")
Note("e'4")
Note("f'4")
Iterate timeline backward from `component`:
::
>>> for leaf in iterate(score[1][2]).by_timeline_from_component(
... reverse=True):
... leaf
...
Note("b'8")
Note("c'4")
Note("a'8")
Note("g'8")
Yield components sorted backward by score offset stop time
when `reverse` is True.
Iterate leaves when `component_class` is none.
.. todo:: Optimize to avoid behind-the-scenes full-score traversal.
'''
assert isinstance(self._client, scoretools.Component)
if component_class is None:
component_class = scoretools.Leaf
root = self._client._get_parentage().root
component_generator = iterate(root).by_timeline(
component_class=component_class,
reverse=reverse,
)
yielded_expr = False
for component in component_generator:
if yielded_expr:
yield component
elif component is self._client:
yield component
yielded_expr = True
def by_topmost_logical_ties_and_components(self):
r'''Iterate topmost logical ties and components forward in `expr`:
::
>>> string = r"c'8 ~ c'32 d'8 ~ d'32 \times 2/3 { e'8 f'8 g'8 } "
>>> string += "a'8 ~ a'32 b'8 ~ b'32"
>>> staff = Staff(string)
.. doctest::
>>> print(format(staff))
\new Staff {
c'8 ~
c'32
d'8 ~
d'32
\times 2/3 {
e'8
f'8
g'8
}
a'8 ~
a'32
b'8 ~
b'32
}
::
>>> for x in iterate(staff).by_topmost_logical_ties_and_components():
... x
...
LogicalTie(Note("c'8"), Note("c'32"))
LogicalTie(Note("d'8"), Note("d'32"))
Tuplet(Multiplier(2, 3), "e'8 f'8 g'8")
LogicalTie(Note("a'8"), Note("a'32"))
LogicalTie(Note("b'8"), Note("b'32"))
Raise logical tie error on overlapping logical ties.
Returns generator.
'''
from abjad.tools import selectiontools
prototype = (spannertools.Tie,)
if isinstance(self._client, scoretools.Leaf):
logical_tie = self._client._get_logical_tie()
if len(logical_tie) == 1:
yield logical_tie
else:
message = 'can not have only one leaf in logical tie.'
raise ValueError(message)
elif isinstance(
self._client, (
collections.Sequence,
scoretools.Container,
selectiontools.SliceSelection,
)):
for component in self._client:
if isinstance(component, scoretools.Leaf):
tie_spanners = component._get_spanners(prototype)
if not tie_spanners or \
tuple(tie_spanners)[0]._is_my_last_leaf(component):
yield component._get_logical_tie()
elif isinstance(component, scoretools.Container):
yield component
else:
message = 'input must be iterable: {!r}.'
message = message.format(self._client)
raise ValueError(message)
def by_vertical_moment(
self,
reverse=False,
):
r'''Iterate vertical moments forward in `expr`:
::
>>> score = Score([])
>>> staff = Staff(r"\times 4/3 { d''8 c''8 b'8 }")
>>> score.append(staff)
::
>>> staff_group = StaffGroup([])
>>> staff_group.context_name = 'PianoStaff'
>>> staff_group.append(Staff("a'4 g'4"))
>>> staff_group.append(Staff(r"""\clef "bass" f'8 e'8 d'8 c'8"""))
>>> score.append(staff_group)
.. doctest::
>>> print(format(score))
\new Score <<
\new Staff {
\tweak #'text #tuplet-number::calc-fraction-text
\times 4/3 {
d''8
c''8
b'8
}
}
\new PianoStaff <<
\new Staff {
a'4
g'4
}
\new Staff {
\clef "bass"
f'8
e'8
d'8
c'8
}
>>
>>
::
>>> for x in iterate(score).by_vertical_moment():
... x.leaves
...
(Note("d''8"), Note("a'4"), Note("f'8"))
(Note("d''8"), Note("a'4"), Note("e'8"))
(Note("c''8"), Note("a'4"), Note("e'8"))
(Note("c''8"), Note("g'4"), Note("d'8"))
(Note("b'8"), Note("g'4"), Note("d'8"))
(Note("b'8"), Note("g'4"), Note("c'8"))
::
>>> for x in iterate(staff_group).by_vertical_moment():
... x.leaves
...
(Note("a'4"), Note("f'8"))
(Note("a'4"), Note("e'8"))
(Note("g'4"), Note("d'8"))
(Note("g'4"), Note("c'8"))
Iterate vertical moments backward in `expr`:
::
>>> for x in iterate(score).by_vertical_moment(reverse=True):
... x.leaves
...
(Note("b'8"), Note("g'4"), Note("c'8"))
(Note("b'8"), Note("g'4"), Note("d'8"))
(Note("c''8"), Note("g'4"), Note("d'8"))
(Note("c''8"), Note("a'4"), Note("e'8"))
(Note("d''8"), Note("a'4"), Note("e'8"))
(Note("d''8"), Note("a'4"), Note("f'8"))
::
>>> for x in iterate(staff_group).by_vertical_moment(reverse=True):
... x.leaves
...
(Note("g'4"), Note("c'8"))
(Note("g'4"), Note("d'8"))
(Note("a'4"), Note("e'8"))
(Note("a'4"), Note("f'8"))
Returns generator.
'''
from abjad.tools import selectiontools
def _buffer_components_starting_with(component, buffer, stop_offsets):
#if not isinstance(component, scoretools.Component):
# raise TypeError
buffer.append(component)
stop_offsets.append(component._get_timespan().stop_offset)
if isinstance(component, scoretools.Container):
if component.is_simultaneous:
for x in component:
_buffer_components_starting_with(
x, buffer, stop_offsets)
else:
if component:
_buffer_components_starting_with(
component[0], buffer, stop_offsets)
def _iterate_vertical_moments_forward_in_expr(expr):
#if not isinstance(expr, scoretools.Component):
# raise TypeError
governors = (expr,)
current_offset, stop_offsets, buffer = \
durationtools.Offset(0), [], []
_buffer_components_starting_with(expr, buffer, stop_offsets)
while buffer:
vertical_moment = selectiontools.VerticalMoment()
offset = durationtools.Offset(current_offset)
components = list(buffer)
components.sort(key=lambda x: x._get_parentage().score_index)
vertical_moment._offset = offset
vertical_moment._governors = governors
vertical_moment._components = components
yield vertical_moment
current_offset, stop_offsets = min(stop_offsets), []
_update_buffer(current_offset, buffer, stop_offsets)
def _next_in_parent(component):
from abjad.tools import selectiontools
if not isinstance(component, scoretools.Component):
raise TypeError
selection = selectiontools.SliceSelection(component)
parent, start, stop = \
selection._get_parent_and_start_stop_indices()
assert start == stop
if parent is None:
raise StopIteration
# can not advance within simultaneous parent
if parent.is_simultaneous:
raise StopIteration
try:
return parent[start + 1]
except IndexError:
raise StopIteration
def _update_buffer(current_offset, buffer, stop_offsets):
#print 'At %s with %s ...' % (current_offset, buffer)
for component in buffer[:]:
if component._get_timespan().stop_offset <= current_offset:
buffer.remove(component)
try:
next_component = _next_in_parent(component)
_buffer_components_starting_with(
next_component, buffer, stop_offsets)
except StopIteration:
pass
else:
stop_offsets.append(component._get_timespan().stop_offset)
if not reverse:
for x in _iterate_vertical_moments_forward_in_expr(self._client):
yield x
else:
moments_in_governor = []
for component in self.by_class():
offset = component._get_timespan().start_offset
if offset not in moments_in_governor:
moments_in_governor.append(offset)
moments_in_governor.sort()
for moment_in_governor in reversed(moments_in_governor):
yield self._client._get_vertical_moment_at(moment_in_governor)
def depth_first(
self,
capped=True,
direction=Left,
forbid=None,
unique=True,
):
'''Iterate components depth-first from `component`.
.. todo:: Add usage examples.
'''
def _next_node_depth_first(component, total):
r'''If client has unvisited music, return next unvisited node in
client's music.
If client has no univisited music and has a parent, return client's
parent.
If client has no univisited music and no parent, return None.
'''
client = component
if hasattr(client, '_music') and \
0 < len(client) and \
total < len(client):
return client[total], 0
else:
parent = client._parent
if parent is not None:
return parent, parent.index(client) + 1
else:
return None, None
def _previous_node_depth_first(component, total=0):
r'''If client has unvisited music, return previous unvisited node in
client's music.
If client has no univisited music and has a parent, return client's
parent.
If client has no univisited music and no parent, return None.
'''
client = component
if hasattr(client, '_music') and \
0 < len(client) and \
total < len(client):
return client[len(client) - 1 - total], 0
else:
parent = client._parent
if parent is not None:
return parent, len(parent) - parent.index(client)
else:
return None, None
def _handle_forbidden_node(node, queue):
node_parent = node._parent
if node_parent is not None:
rank = node_parent.index(node) + 1
node = node_parent
else:
node, rank = None, None
queue.pop()
return node, rank
def _advance_node_depth_first(node, rank, direction):
# TODO: remove 'left'
if direction in ('left', Left):
node, rank = _next_node_depth_first(node, rank)
else:
node, rank = _previous_node_depth_first(node, rank)
return node, rank
def _is_node_forbidden(node, forbid):
if forbid is None:
return False
elif forbid == 'simultaneous':
return getattr(node, 'is_simultaneous', False)
else:
return isinstance(node, forbid)
def _find_yield(node, rank, queue, unique):
if hasattr(node, '_music'):
try:
visited = node is queue[-1]
except IndexError:
visited = False
if not visited or unique is not True:
queue.append(node)
return node
elif rank == len(node):
queue.pop()
return None
else:
return node
assert isinstance(self._client, scoretools.Component)
component = self._client
client_parent, node, rank = component._parent, component, 0
queue = collections.deque([])
while node is not None and not (capped and node is client_parent):
result = _find_yield(node, rank, queue, unique)
if result is not None:
yield result
if _is_node_forbidden(node, forbid):
node, rank = _handle_forbidden_node(node, queue)
else:
node, rank = _advance_node_depth_first(node, rank, direction)
queue.clear() | gpl-3.0 |
DevangS/CoralNet | images/migrations/0011_view_and_edit_perms_for_sources.py | 1 | 18414 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
depends_on = (
("guardian", "0005_auto__chg_field_groupobjectpermission_object_pk__chg_field_userobjectp"),
)
def forwards(self, orm):
print "-----"
print "This migration will create the Source View and Edit"
print "permission types, if they don't already exist in the"
print "auth system."
print ""
try:
ct = orm['contenttypes.ContentType'].objects.get(model='source', app_label='images') # model must be lowercase
except orm['contenttypes.ContentType'].DoesNotExist:
print "Didn't find a ContentType for the Source model, so we can deduce"
print "that the Source model has just been created during this series"
print "of migration runs. The Admin, Edit, and View permissions"
print "should be auto-created later along with the Source"
print "model. No need to do anything in this migration."
else:
view_perm, created = orm['auth.permission'].objects.get_or_create(
content_type=ct, codename=u'source_view', defaults={'name': u'View'})
if created:
print "Source View permission type created."
else:
print "Source View permission type already exists."
edit_perm, created = orm['auth.permission'].objects.get_or_create(
content_type=ct, codename=u'source_edit', defaults={'name': u'Edit'})
if created:
print "Source Edit permission type created."
else:
print "Source Edit permission type already exists."
print "Next, we'll grant View and Edit permissions for all current Source Admins."
print "NOTE: If you have any orphaned object permissions (permission to a source that doesn't exist), this might fail. To clean up those orphaned permissions, try:"
print "./manage.py clean_orphan_obj_perms"
admin_perm = orm['auth.permission'].objects.get(content_type=ct, codename=u'source_admin')
for p in orm['guardian.userobjectpermission'].objects.filter(permission=admin_perm):
source = orm['images.source'].objects.get(pk=p.object_pk)
view_userobjperm, created = orm['guardian.userobjectpermission'].objects.get_or_create(
permission=view_perm, object_pk=p.object_pk, user=p.user, content_type=ct)
if created:
print "User %s has been granted View permission for Source %s." % (p.user.username, source.name)
else:
print "User %s already has View permission for Source %s." % (p.user.username, source.name)
edit_userobjperm, created = orm['guardian.userobjectpermission'].objects.get_or_create(
permission=edit_perm, object_pk=p.object_pk, user=p.user, content_type=ct)
if created:
print "User %s has been granted Edit permission for Source %s." % (p.user.username, source.name)
else:
print "User %s already has Edit permission for Source %s." % (p.user.username, source.name)
print "Done granting permissions."
print "-----"
def backwards(self, orm):
print "-----"
print "NOTE: This backwards migration will remove all instances of the View and Edit Source permissions in the database."
ct = orm['contenttypes.ContentType'].objects.get(model='source', app_label='images') # model must be lowercase
view_perm = orm['auth.permission'].objects.get(content_type=ct, codename=u'source_view')
edit_perm = orm['auth.permission'].objects.get(content_type=ct, codename=u'source_edit')
for p in orm['guardian.userobjectpermission'].objects.filter(permission=view_perm, content_type=ct):
source = orm['images.source'].objects.get(pk=p.object_pk)
p.delete()
print "User %s's View permission for Source %s has been removed." % (p.user.username, source.name)
for p in orm['guardian.userobjectpermission'].objects.filter(permission=edit_perm, content_type=ct):
source = orm['images.source'].objects.get(pk=p.object_pk)
p.delete()
print "User %s's Edit permission for Source %s has been removed." % (p.user.username, source.name)
view_perm.delete()
print "Removed the View Source permission type."
edit_perm.delete()
print "Removed the Edit Source permission type."
print "Done."
print "-----"
models = {
'annotations.label': {
'Meta': {'object_name': 'Label'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.LabelGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'})
},
'annotations.labelgroup': {
'Meta': {'object_name': 'LabelGroup'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'annotations.labelset': {
'Meta': {'object_name': 'LabelSet'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['annotations.Label']", 'symmetrical': 'False'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'images.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Metadata']"}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'original_height': ('django.db.models.fields.IntegerField', [], {}),
'original_width': ('django.db.models.fields.IntegerField', [], {}),
'point_generation_method': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'images.metadata': {
'Meta': {'object_name': 'Metadata'},
'balance': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'camera': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'depth': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'framing': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'group1_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group2_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group3_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group4_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group5_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group6_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group7_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'photo_date': ('django.db.models.fields.DateField', [], {}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'pixel_cm_ratio': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'strobes': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'value1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value1']", 'null': 'True'}),
'value2': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value2']", 'null': 'True'}),
'value3': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value3']", 'null': 'True'}),
'value4': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value4']", 'null': 'True'}),
'value5': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value5']", 'null': 'True'}),
'water_quality': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'images.point': {
'Meta': {'object_name': 'Point'},
'annotation_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'column': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Image']"}),
'point_number': ('django.db.models.fields.IntegerField', [], {}),
'row': ('django.db.models.fields.IntegerField', [], {})
},
'images.source': {
'Meta': {'object_name': 'Source'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_point_generation_method': ('django.db.models.fields.CharField', [], {'default': "'m_200'", 'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key1': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key2': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key3': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key4': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key5': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'labelset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.LabelSet']"}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'v'", 'max_length': '1'})
},
'images.value1': {
'Meta': {'object_name': 'Value1'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value2': {
'Meta': {'object_name': 'Value2'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value3': {
'Meta': {'object_name': 'Value3'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value4': {
'Meta': {'object_name': 'Value4'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value5': {
'Meta': {'object_name': 'Value5'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'guardian.userobjectpermission': {
'Meta': {'unique_together': "(['user', 'permission', 'content_type', 'object_pk'],)", 'object_name': 'UserObjectPermission'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Permission']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['images']
| bsd-2-clause |
andreyvit/pyjamas | library/__ie6__/pyjamas/History.py | 5 | 2645 | def init():
JS("""
// Check for existence of the history frame.
var historyFrame = $doc.getElementById('__pygwt_historyFrame');
if (!historyFrame)
return false;
// Get the initial token from the url's hash component.
var hash = $wnd.location.hash;
if (hash.length > 0)
$wnd.__historyToken = decodeURIComponent(hash.substring(1));
else
$wnd.__historyToken = '';
// Initialize the history iframe. If '__historyToken' already exists, then
// we're probably backing into the app, so _don't_ set the iframe's location.
var tokenElement = null;
if (historyFrame.contentWindow) {
var doc = historyFrame.contentWindow.document;
tokenElement = doc ? doc.getElementById('__historyToken') : null;
}
if (tokenElement)
$wnd.__historyToken = tokenElement.value;
else
historyFrame.src = 'history.html?' + $wnd.__historyToken;
// Expose the '__onHistoryChanged' function, which will be called by
// the history frame when it loads.
$wnd.__onHistoryChanged = function(token) {
// Change the URL and notify the application that its history frame
// is changing. Note that setting location.hash does _not_ add a history
// frame on IE, so we don't have to do a 'location.replace()'.
if (token != $wnd.__historyToken) {
$wnd.__historyToken = token;
$wnd.location.hash = encodeURIComponent(token);
// TODO - move init back into History
// this.onHistoryChanged(token);
pyjamas.History.onHistoryChanged(token);
}
};
// This is the URL check timer. It detects when an unexpected change
// occurs in the document's URL (e.g. when the user enters one manually
// or selects a 'favorite', but only the #hash part changes). When this
// occurs, we _must_ reload the page. This is because IE has a really
// nasty bug that totally mangles its history stack and causes the location
// bar in the UI to stop working under these circumstances.
var urlChecker = function() {
var hash = $wnd.location.hash;
if (hash.length > 0) {
var token = decodeURIComponent(hash.substring(1));
if ($wnd.__historyToken && (token != $wnd.__historyToken))
$wnd.location.reload();
}
$wnd.setTimeout(urlChecker, 250);
};
urlChecker();
return true;
""")
def newItem(historyToken):
JS("""
var iframe = $doc.getElementById('__pygwt_historyFrame');
iframe.contentWindow.location.href = 'history.html?' + historyToken;
""")
| apache-2.0 |
sserrot/champion_relationships | venv/Lib/site-packages/pip/_internal/models/format_control.py | 11 | 2823 | from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.exceptions import CommandError
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Set, FrozenSet
class FormatControl(object):
"""Helper for managing formats from which a package can be installed.
"""
__slots__ = ["no_binary", "only_binary"]
def __init__(self, no_binary=None, only_binary=None):
# type: (Optional[Set[str]], Optional[Set[str]]) -> None
if no_binary is None:
no_binary = set()
if only_binary is None:
only_binary = set()
self.no_binary = no_binary
self.only_binary = only_binary
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, self.__class__):
return NotImplemented
if self.__slots__ != other.__slots__:
return False
return all(
getattr(self, k) == getattr(other, k)
for k in self.__slots__
)
def __ne__(self, other):
# type: (object) -> bool
return not self.__eq__(other)
def __repr__(self):
# type: () -> str
return "{}({}, {})".format(
self.__class__.__name__,
self.no_binary,
self.only_binary
)
@staticmethod
def handle_mutual_excludes(value, target, other):
# type: (str, Set[str], Set[str]) -> None
if value.startswith('-'):
raise CommandError(
"--no-binary / --only-binary option requires 1 argument."
)
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
# Without a none, we want to discard everything as :all: covers it
if ':none:' not in new:
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def get_allowed_formats(self, canonical_name):
# type: (str) -> FrozenSet[str]
result = {"binary", "source"}
if canonical_name in self.only_binary:
result.discard('source')
elif canonical_name in self.no_binary:
result.discard('binary')
elif ':all:' in self.only_binary:
result.discard('source')
elif ':all:' in self.no_binary:
result.discard('binary')
return frozenset(result)
def disallow_binaries(self):
# type: () -> None
self.handle_mutual_excludes(
':all:', self.no_binary, self.only_binary,
)
| mit |
Ecogenomics/GTDBNCBI | scripts_dev/legacy_code/ncbi_ftp_update/update_genbank_from_ftp_files.py | 1 | 18935 | #!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__prog_name__ = 'update_genbank_from_ftp_files.py'
__prog_desc__ = ('Check which genomes are not present in Refseq but present in Genbank.' +
'If the genome is in Genbank, it is copied either from the ftp website for a new genome' +
' or from the previous genbank folder if it\'s an existing genome')
__author__ = 'Pierre Chaumeil'
__copyright__ = 'Copyright 2016'
__credits__ = ['Pierre Chaumeil']
__license__ = 'GPL3'
__version__ = '0.0.1'
__maintainer__ = 'Pierre Chaumeil'
__email__ = 'p.chaumeil@qfab.org'
__status__ = 'Development'
import os
import sys
import collections
import shutil
import glob
import gzip
import hashlib
import datetime
import argparse
class UpdateGenbankFolder(object):
def __init__(self, new_genbank_folder):
self.domains = ["archaea", "bacteria"]
self.fastaExts = ("_genomic.fna.gz", "_protein.faa.gz")
self.extensions = ("_feature_table.txt.gz", "_genomic.gbff.gz",
"_genomic.gff.gz", "_protein.gpff.gz", "_wgsmaster.gbff.gz")
self.reports = ("_assembly_report.txt", "_assembly_stats.txt")
self.allExts = self.fastaExts + self.extensions + self.reports
self.allbutFasta = self.extensions + self.reports
self.log = open(os.path.join(new_genbank_folder, "extra_gbk_report_gcf.log"), "w")
self.select_gca = open(os.path.join(new_genbank_folder, "gca_selection.log"), "w")
def runComparison(self, ftp_genbank, new_genbank, ftp_genbank_genome_dirs, old_genbank_genome_dirs, new_refseq_genome_dirs):
'''
runComparison function is walking across all directories recursively
only folder containing latest_assembly_versions but not containing _assembly_structure
are of interest
'''
for domain in self.domains:
# old_dict lists all records from the previous gtdb update
with open(old_genbank_genome_dirs, 'r') as old_file:
old_dict = {old_line.split("\t")[0]: old_line.split("\t")[1].strip()
for old_line in old_file if "/{0}/".format(domain) in old_line.split("\t")[1]}
listGCA = self.parseAssemblySummary(
domain, ftp_genbank, new_refseq_genome_dirs)
# new dict lists all records from FTP which are in new_list
with open(ftp_genbank_genome_dirs, 'r') as new_genome_dirs_file:
new_dict = {new_line.split("\t")[0]: new_line.split("\t")[1].strip()
for new_line in new_genome_dirs_file if "/{0}/".format(domain) in new_line.split("\t")[1] and new_line.split("\t")[0] in listGCA}
# new genomes in FTP
added_dict = {added_key: new_dict[added_key] for added_key in list(
set(new_dict.keys()) - set(old_dict.keys()))}
print "{0} genomes to add for {1}".format(len(added_dict), domain)
self.addGenomes(added_dict, ftp_genbank, new_genbank, domain)
# delete genomes from the Database
removed_dict = {removed_key: old_dict[removed_key] for removed_key in list(
set(old_dict.keys()) - set(new_dict.keys()))}
print "{0} genomes to remove for {1}".format(len(removed_dict), domain)
self.removeGenomes(removed_dict, domain)
intersect_list = list(
set(old_dict.keys()).intersection(set(new_dict.keys())))
print "{0} genomes to compare for {1}".format(len(intersect_list), domain)
self.compareGenomes(
intersect_list, old_dict, new_dict, ftp_genbank, new_genbank, domain)
self.select_gca.close()
self.log.close()
def addGenomes(self, added_dict, ftp_genbank, new_genbank, domain):
'''
addGenomes function insert new genomes in the GTDB database. New genomes are present in the FTP folder
but not in the previous version of GTDB.
:TODO: Check if the new genome is a new version of an existing genome. in that case we overwrite the previous one
and keep the same database id
This will cause a conflict with the removeGenomes function.
:param added_dict: dictionary of genomes to be added (genome_id:path to genome)
:param ftp_genbank: base directory leading the the FTP repository for refseq
:param new_genbank:base directory leading the new repository for refseq
:param domain:archaea or bacteria
'''
for gcf_record in added_dict:
target_dir = added_dict[gcf_record].replace(ftp_genbank, new_genbank).replace(
"/latest_assembly_versions", "")
shutil.copytree(added_dict[
gcf_record], target_dir, ignore=shutil.ignore_patterns("*_assembly_structure"))
self.log.write(
"{0}\t{1}\tnew\n".format(domain.upper(), gcf_record))
for compressed_file in glob.glob(target_dir + "/*.gz"):
if os.path.isdir(compressed_file) == False:
inF = gzip.open(compressed_file, 'rb')
outF = open(
self.rreplace(compressed_file, ".gz", "", 1), 'wb')
outF.write(inF.read())
inF.close()
outF.close()
os.remove(compressed_file)
def removeGenomes(self, removed_dict, domain):
'''
removeGenomes function removes all outdated genomes from the gtdb database
In addition it tracks the lists(name and owner) that have been modified while deleting those genomes
:param removed_dict: dictionary of genomes to delete
'''
for gcf_record in removed_dict:
self.log.write(
"{0}\t{1}\tremoved\n".format(domain.upper(), gcf_record))
def compareGenomes(self, intersect_list, old_dict, new_dict, ftp_genbank, new_genbank, domain):
'''
compare the genomes existing in both folders ( FTP folder and previous gtdb update).
:param intersect_list:
:param old_dict:
:param new_dict:
'''
for gcf_record in intersect_list:
gtdb_dir = old_dict.get(gcf_record)
ftp_dir = new_dict.get(gcf_record)
target_dir = ftp_dir.replace(ftp_genbank, new_genbank).replace(
"latest_assembly_versions/", "")
self.readmd5Checksum(
gtdb_dir, ftp_dir, target_dir, gcf_record, domain)
def readmd5Checksum(self, gtdb_dir, ftp_dir, target_dir, gcf_record, domain):
'''
Compare the checksum of the file listed in the checksums.txt
'''
pathftpmd5 = os.path.join(ftp_dir, "md5checksums.txt")
pathgtdbmd5 = os.path.join(gtdb_dir, "md5checksums.txt")
target_pathnewmd5 = os.path.join(target_dir, "md5checksums.txt")
status = []
ftpdict, ftpdict_fasta = self.parse_checksum(pathftpmd5)
gtdbdict, gtdbdict_fasta = self.parse_checksum(pathgtdbmd5)
# if the genomic.fna.gz or the protein.faa.gz are missing, we set this
# record as incomplete
if len(ftpdict_fasta.keys()) < 2:
status.append("incomplete")
# We copy the incomplete ones from ftp even if they miss data
shutil.copytree(
ftp_dir, target_dir,
ignore=shutil.ignore_patterns("*_assembly_structure"))
status.append("modified")
# we unzip of gz file
for compressed_file in glob.glob(target_dir + "/*.gz"):
if os.path.isdir(compressed_file) == False:
inF = gzip.open(compressed_file, 'rb')
outF = open(
self.rreplace(compressed_file, ".gz", "", 1), 'wb')
outF.write(inF.read())
inF.close()
outF.close()
os.remove(compressed_file)
return False
else:
ftp_folder = False
# check if genomic.fna.gz and protein.faa.gz are similar between
# previous gtdb and ftp
for key, value in ftpdict_fasta.iteritems():
if value != gtdbdict_fasta.get(key):
ftp_folder = True
# if one of the 2 files is different than the previous version , we
# use the ftp record over the previous gtdb one , we then need to
# re run the metadata generation
if ftp_folder:
shutil.copytree(
ftp_dir, target_dir,
ignore=shutil.ignore_patterns("*_assembly_structure"))
status.append("modified")
# we unzip of gz file
for compressed_file in glob.glob(target_dir + "/*.gz"):
if os.path.isdir(compressed_file) == False:
inF = gzip.open(compressed_file, 'rb')
outF = open(
self.rreplace(compressed_file, ".gz", "", 1), 'wb')
outF.write(inF.read())
inF.close()
outF.close()
os.remove(compressed_file)
else:
# The 2 main fasta files haven't changed so we can copy the old
# gtdb folder over
shutil.copytree(
gtdb_dir, target_dir,
ignore=shutil.ignore_patterns("*_assembly_structure"))
status.append("unmodified")
# We check if all other file of this folder are the same.
checksum_changed = False
for key, value in ftpdict.iteritems():
if value != gtdbdict.get(key):
checksum_changed = True
shutil.copy2(
os.path.join(ftp_dir, key), os.path.join(target_dir, key))
if key.endswith(".gz"):
inF = gzip.open(os.path.join(ftp_dir, key), 'rb')
try:
outF = open(
self.rreplace(os.path.join(target_dir, key), ".gz", "", 1), 'wb')
except IOError:
os.chmod(
self.rreplace(os.path.join(target_dir, key), ".gz", "", 1), 0o775)
outF = open(
self.rreplace(os.path.join(target_dir, key), ".gz", "", 1), 'wb')
outF.write(inF.read())
inF.close()
outF.close()
os.remove(os.path.join(target_dir, key))
status.append("new_metadata")
# we copy the new checksum
if checksum_changed:
try:
shutil.copy2(pathgtdbmd5, target_pathnewmd5)
except IOError:
os.chmod(target_pathnewmd5, 0o664)
shutil.copy2(pathftpmd5, target_pathnewmd5)
for report in self.reports:
target_files = glob.glob(
os.path.join(target_dir, "*" + report))
ftp_files = glob.glob(os.path.join(ftp_dir, "*" + report))
if len(target_files) == 1 and len(ftp_files) == 1:
status = self.comparesha256(
ftp_files[0], target_files[0], status)
else:
print target_files
print ftp_files
print "IT SHOULDN'T HAPPEN"
self.log.write("{0}\t{1}\t{2}\n".format(
domain.upper(), gcf_record, ';'.join([x for x in set(status)])))
# Tools
def parseAssemblySummary(self, domain, ftp_genbank, new_refseq_genome_dirs):
listGCA = []
dictGCF = self._populateGenomesDict(new_refseq_genome_dirs)
print "parsing of dictoinary is done"
with open(os.path.join(ftp_genbank, domain, "assembly_summary.txt"), "r") as sumf:
# we discard the first line
sumf.readline()
for line in sumf:
split_line = line.split("\t")
gcf_access = split_line[17]
full_gca_access = split_line[0]
latest = split_line[10]
if latest == "latest":
if not gcf_access.startswith("GCF"):
listGCA.append(full_gca_access)
self.select_gca.write("{0}\tNo GCF\n".format(
full_gca_access))
else:
# if the Refseq folder is empty, we copy the genbank
# folder
if gcf_access in dictGCF:
protein_files = glob.glob(
os.path.join(dictGCF.get(gcf_access), "*_protein.faa"))
if len(protein_files) == 0:
self.select_gca.write(
"{0} associated with {1} : {1} missed files in FTP folder\n".format(full_gca_access, gcf_access))
listGCA.append(full_gca_access)
else:
self.select_gca.write(
"{0} associated with {1} : {1} not present in FTP folder\n".format(full_gca_access, gcf_access))
listGCA.append(full_gca_access)
return listGCA
def comparesha256(self, ftp_file, target_file, status):
'''
comparesha256 compares the report file
:param ftp_file:
:param target_file:
:param status:
'''
original_checksum = hashlib.md5(
open(ftp_file, 'rb').read()).hexdigest()
gtdb_checksum = hashlib.md5(open(target_file, 'rb').read()).hexdigest()
if original_checksum != gtdb_checksum:
try:
shutil.copy2(ftp_file, target_file)
except IOError:
os.chmod(target_file, 0o664)
shutil.copy2(ftp_file, target_file)
status.append("new_metadata")
return status
def _populateGenomesDict(self, genome_dirs_file):
temp_dict = {}
with open(genome_dirs_file, "r") as list_dirs:
for line in list_dirs:
temp_dict[line.split("\t")[0]] = line.split(
"\t")[1].rstrip()
return temp_dict
def rreplace(self, s, old, new, occurrence):
'''
Instead of using the normal replace function, we need to implement our own.
Some folder are named with a .gz in the middle so we only need to replace the last .gz in the string name
:param s:
:param old:
:param new:
:param occurrence:
'''
li = s.rsplit(old, occurrence)
return new.join(li)
def parse_checksum(self, md5File):
'''
parse_checksum function parses the md5 checksum file.
It returns 2 dictionaries {file:size} : one for the fna and faa files, one for the genbank files
:param md5File:
'''
out_dict, out_dict_fasta = {}, {}
with open(md5File) as f:
for line in f:
split_line = line.rstrip().split(" ")
header = split_line[1].replace("./", "")
chksum = split_line[0]
if header.endswith(self.fastaExts):
out_dict_fasta[header] = chksum
if header.endswith(self.allbutFasta):
out_dict[header] = chksum
return (out_dict, out_dict_fasta)
if __name__ == "__main__":
print __prog_name__ + ' v' + __version__ + ': ' + __prog_desc__
print ' by ' + __author__ + ' (' + __email__ + ')' + '\n'
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--ftp_genbank_directory', dest="ftp_genbank", required=True,
help='base directory leading the the FTP repository for genbank')
parser.add_argument('--new_genbank_directory', dest="new_genbank",
required=True, help='base directory leading the new repository for genbank')
parser.add_argument('--ftp_genbank_genome_dirs_file', dest="ftp_genbank_genome_dirs", required=True,
help='metadata file listing all directories for the FTP folder (generated by ncbi_genome_dirs.py)')
parser.add_argument('--old_genbank_genome_dirs_file', dest="old_genbank_genome_dirs", required=True,
help='metadata file listing all directories from the previous NCBI update date (generated by genome_dirs.py)')
parser.add_argument('--new_refseq_genome_dirs_file', dest="new_refseq_genome_dirs", required=True,
help='metadata file listing all directories from the previous NCBI update date (generated by genome_dirs.py)')
args = parser.parse_args()
try:
update_manager = UpdateGenbankFolder(args.new_genbank)
update_manager.runComparison(
args.ftp_genbank, args.new_genbank, args.ftp_genbank_genome_dirs, args.old_genbank_genome_dirs, args.new_refseq_genome_dirs)
except SystemExit:
print "\nControlled exit resulting from an unrecoverable error or warning."
except:
print "\nUnexpected error:", sys.exc_info()[0]
raise | gpl-3.0 |
Matt-Deacalion/django | django/utils/baseconv.py | 650 | 2982 | # Copyright (c) 2010 Guilherme Gondim. All rights reserved.
# Copyright (c) 2009 Simon Willison. All rights reserved.
# Copyright (c) 2002 Drew Perttula. All rights reserved.
#
# License:
# Python Software Foundation License version 2
#
# See the file "LICENSE" for terms & conditions for usage, and a DISCLAIMER OF
# ALL WARRANTIES.
#
# This Baseconv distribution contains no GNU General Public Licensed (GPLed)
# code so it may be used in proprietary projects just like prior ``baseconv``
# distributions.
#
# All trademarks referenced herein are property of their respective holders.
#
"""
Convert numbers from base 10 integers to base X strings and back again.
Sample usage::
>>> base20 = BaseConverter('0123456789abcdefghij')
>>> base20.encode(1234)
'31e'
>>> base20.decode('31e')
1234
>>> base20.encode(-1234)
'-31e'
>>> base20.decode('-31e')
-1234
>>> base11 = BaseConverter('0123456789-', sign='$')
>>> base11.encode('$1234')
'$-22'
>>> base11.decode('$-22')
'$1234'
"""
BASE2_ALPHABET = '01'
BASE16_ALPHABET = '0123456789ABCDEF'
BASE56_ALPHABET = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz'
BASE36_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz'
BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
BASE64_ALPHABET = BASE62_ALPHABET + '-_'
class BaseConverter(object):
decimal_digits = '0123456789'
def __init__(self, digits, sign='-'):
self.sign = sign
self.digits = digits
if sign in self.digits:
raise ValueError('Sign character found in converter base digits.')
def __repr__(self):
return "<BaseConverter: base%s (%s)>" % (len(self.digits), self.digits)
def encode(self, i):
neg, value = self.convert(i, self.decimal_digits, self.digits, '-')
if neg:
return self.sign + value
return value
def decode(self, s):
neg, value = self.convert(s, self.digits, self.decimal_digits, self.sign)
if neg:
value = '-' + value
return int(value)
def convert(self, number, from_digits, to_digits, sign):
if str(number)[0] == sign:
number = str(number)[1:]
neg = 1
else:
neg = 0
# make an integer out of the number
x = 0
for digit in str(number):
x = x * len(from_digits) + from_digits.index(digit)
# create the result in base 'len(to_digits)'
if x == 0:
res = to_digits[0]
else:
res = ''
while x > 0:
digit = x % len(to_digits)
res = to_digits[digit] + res
x = int(x // len(to_digits))
return neg, res
base2 = BaseConverter(BASE2_ALPHABET)
base16 = BaseConverter(BASE16_ALPHABET)
base36 = BaseConverter(BASE36_ALPHABET)
base56 = BaseConverter(BASE56_ALPHABET)
base62 = BaseConverter(BASE62_ALPHABET)
base64 = BaseConverter(BASE64_ALPHABET, sign='$')
| bsd-3-clause |
redhat-openstack/trove | trove/tests/unittests/common/test_remote.py | 3 | 28702 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import uuid
from mock import patch, MagicMock
import swiftclient.client
from testtools import ExpectedException, matchers
from trove.common import cfg
from trove.common.context import TroveContext
from trove.common import exception
from trove.common import remote
from trove.tests.fakes.swift import SwiftClientStub
from trove.tests.unittests import trove_testtools
class TestRemote(trove_testtools.TestCase):
def setUp(self):
super(TestRemote, self).setUp()
def tearDown(self):
super(TestRemote, self).tearDown()
@patch.object(swiftclient.client.Connection, 'get_auth')
def test_creation(self, get_auth_mock):
self.assertIsNotNone(swiftclient.client.Connection())
def test_create_swift_client(self):
mock_resp = MagicMock()
with patch.object(swiftclient.client.Connection, 'get_container',
MagicMock(return_value=["text", mock_resp])):
service_catalog = [{'endpoints': [{'region': 'RegionOne',
'publicURL': 'example.com'}],
'type': 'object-store'}]
client = remote.create_swift_client(TroveContext(
tenant=uuid.uuid4().hex,
service_catalog=service_catalog))
headers, container = client.get_container('bob')
self.assertIs(headers, "text")
self.assertIs(container, mock_resp)
def test_empty_account(self):
"""
this is an account with no containers and no objects
"""
# setup expectation
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
# interact
conn = swiftclient.client.Connection()
account_info = conn.get_account()
self.assertThat(account_info, matchers.Not(matchers.Is(None)))
self.assertThat(len(account_info), matchers.Is(2))
self.assertThat(account_info, matchers.IsInstance(tuple))
self.assertThat(account_info[0], matchers.IsInstance(dict))
self.assertThat(
account_info[0],
matchers.KeysEqual('content-length', 'accept-ranges',
'x-timestamp', 'x-trans-id', 'date',
'x-account-bytes-used',
'x-account-container-count',
'content-type',
'x-account-object-count'))
self.assertThat(account_info[1], matchers.IsInstance(list))
self.assertThat(len(account_info[1]), matchers.Is(0))
def test_one_container(self):
"""
tests to ensure behavior is normal with one container
"""
# setup expectation
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
cont_name = 'a-container-name'
swift_stub.with_container(cont_name)
# interact
conn = swiftclient.client.Connection()
conn.get_auth()
conn.put_container(cont_name)
# get headers plus container metadata
self.assertThat(len(conn.get_account()), matchers.Is(2))
# verify container details
account_containers = conn.get_account()[1]
self.assertThat(len(account_containers), matchers.Is(1))
self.assertThat(account_containers[0],
matchers.KeysEqual('count', 'bytes', 'name'))
self.assertThat(account_containers[0]['name'],
matchers.Is(cont_name))
# get container details
cont_info = conn.get_container(cont_name)
self.assertIsNotNone(cont_info)
self.assertThat(
cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count', 'accept-ranges',
'x-container-bytes-used', 'x-timestamp',
'x-trans-id', 'date', 'content-type'))
self.assertThat(len(cont_info[1]), matchers.Equals(0))
# remove container
swift_stub.without_container(cont_name)
with ExpectedException(swiftclient.ClientException):
conn.get_container(cont_name)
# ensure there are no more containers in account
self.assertThat(len(conn.get_account()[1]), matchers.Is(0))
def test_one_object(self):
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
swift_stub.with_container('bob')
swift_stub.with_object('bob', 'test', 'test_contents')
# create connection
conn = swiftclient.client.Connection()
# test container lightly
cont_info = conn.get_container('bob')
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count',
'accept-ranges',
'x-container-bytes-used',
'x-timestamp',
'x-trans-id',
'date',
'content-type'))
cont_objects = cont_info[1]
self.assertThat(len(cont_objects), matchers.Equals(1))
obj_1 = cont_objects[0]
self.assertThat(obj_1, matchers.Equals(
{'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950',
'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test',
'content_type': 'application/octet-stream',
'contents': 'test_contents'}))
# test object api - not much to do here
self.assertThat(conn.get_object('bob', 'test')[1],
matchers.Is('test_contents'))
# test remove object
swift_stub.without_object('bob', 'test')
# interact
with ExpectedException(swiftclient.ClientException):
conn.delete_object('bob', 'test')
self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(0))
def test_two_objects(self):
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
swift_stub.with_container('bob')
swift_stub.with_container('bob2')
swift_stub.with_object('bob', 'test', 'test_contents')
swift_stub.with_object('bob', 'test2', 'test_contents2')
conn = swiftclient.client.Connection()
self.assertIs(len(conn.get_account()), 2)
cont_info = conn.get_container('bob')
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count',
'accept-ranges',
'x-container-bytes-used',
'x-timestamp',
'x-trans-id',
'date',
'content-type'))
self.assertThat(len(cont_info[1]), matchers.Equals(2))
self.assertThat(cont_info[1][0], matchers.Equals(
{'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950',
'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test',
'content_type': 'application/octet-stream',
'contents': 'test_contents'}))
self.assertThat(conn.get_object('bob', 'test')[1],
matchers.Is('test_contents'))
self.assertThat(conn.get_object('bob', 'test2')[1],
matchers.Is('test_contents2'))
swift_stub.without_object('bob', 'test')
with ExpectedException(swiftclient.ClientException):
conn.delete_object('bob', 'test')
self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(1))
swift_stub.without_container('bob')
with ExpectedException(swiftclient.ClientException):
conn.get_container('bob')
self.assertThat(len(conn.get_account()), matchers.Is(2))
def test_nonexisting_container(self):
"""
when a container does not exist and is accessed then a 404 is returned
"""
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
swift_stub.with_container('existing')
conn = swiftclient.client.Connection()
with ExpectedException(swiftclient.ClientException):
conn.get_container('nonexisting')
def test_replace_object(self):
"""
Test to ensure that if an object is updated the container object
count is the same and the contents of the object are updated
"""
with SwiftClientStub() as swift_stub:
swift_stub.with_account('1223df2')
swift_stub.with_container('new-container')
swift_stub.with_object('new-container', 'new-object',
'new-object-contents')
conn = swiftclient.client.Connection()
conn.put_object('new-container', 'new-object',
'new-object-contents')
obj_resp = conn.get_object('new-container', 'new-object')
self.assertThat(obj_resp, matchers.Not(matchers.Is(None)))
self.assertThat(len(obj_resp), matchers.Is(2))
self.assertThat(obj_resp[1], matchers.Is('new-object-contents'))
# set expected behavior - trivial here since it is the intended
# behavior however keep in mind this is just to support testing of
# trove components
swift_stub.with_object('new-container', 'new-object',
'updated-object-contents')
conn.put_object('new-container', 'new-object',
'updated-object-contents')
obj_resp = conn.get_object('new-container', 'new-object')
self.assertThat(obj_resp, matchers.Not(matchers.Is(None)))
self.assertThat(len(obj_resp), matchers.Is(2))
self.assertThat(obj_resp[1], matchers.Is(
'updated-object-contents'))
# ensure object count has not increased
self.assertThat(len(conn.get_container('new-container')[1]),
matchers.Is(1))
class TestCreateCinderClient(trove_testtools.TestCase):
def setUp(self):
super(TestCreateCinderClient, self).setUp()
self.volumev2_public_url = 'http://publicURL/v2'
self.volume_public_url_region_two = 'http://publicURL-r2/v1'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.volumev2_public_url,
}
],
'type': 'volumev2'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.volume_public_url_region_two,
}
],
'type': 'volume'
}
]
def tearDown(self):
super(TestCreateCinderClient, self).tearDown()
cfg.CONF.clear_override('cinder_url')
cfg.CONF.clear_override('cinder_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_cinder_client,
TroveContext())
def test_create_with_conf_override(self):
cinder_url_from_conf = 'http://example.com'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('cinder_url', cinder_url_from_conf,
enforce_type=True)
client = remote.create_cinder_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s/%s' % (cinder_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_conf_override_trailing_slash(self):
cinder_url_from_conf = 'http://example.com/'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('cinder_url', cinder_url_from_conf,
enforce_type=True)
client = remote.create_cinder_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (cinder_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_cinder_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.volumev2_public_url,
client.client.management_url)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('cinder_service_type', 'volume',
enforce_type=True)
cfg.CONF.set_override('os_region_name', 'RegionTwo',
enforce_type=True)
client = remote.create_cinder_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.volume_public_url_region_two,
client.client.management_url)
class TestCreateNovaClient(trove_testtools.TestCase):
def setUp(self):
super(TestCreateNovaClient, self).setUp()
self.compute_public_url = 'http://publicURL/v2'
self.computev3_public_url_region_two = 'http://publicURL-r2/v3'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.compute_public_url,
}
],
'type': 'compute'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.computev3_public_url_region_two,
}
],
'type': 'computev3'
}
]
def tearDown(self):
super(TestCreateNovaClient, self).tearDown()
cfg.CONF.clear_override('nova_compute_url')
cfg.CONF.clear_override('nova_compute_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_nova_client,
TroveContext())
def test_create_with_conf_override(self):
nova_url_from_conf = 'http://example.com'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('nova_compute_url', nova_url_from_conf,
enforce_type=True)
client = remote.create_nova_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s/%s' % (nova_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_conf_override_trailing_slash(self):
nova_url_from_conf = 'http://example.com/'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('nova_compute_url', nova_url_from_conf,
enforce_type=True)
client = remote.create_nova_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (nova_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_nova_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.compute_public_url,
client.client.management_url)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('nova_compute_service_type', 'computev3',
enforce_type=True)
cfg.CONF.set_override('os_region_name', 'RegionTwo',
enforce_type=True)
client = remote.create_nova_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.computev3_public_url_region_two,
client.client.management_url)
def test_create_admin_client(self):
nova_url_from_conf = 'http://adminexample.com/'
cfg.CONF.set_override('nova_compute_url', nova_url_from_conf,
enforce_type=True)
admin_user = 'admin1'
admin_pass = 'adminpwd'
admin_tenant_id = uuid.uuid4().hex
admin_client = remote.create_admin_nova_client(
TroveContext(user=admin_user,
auth_token=admin_pass,
tenant=admin_tenant_id))
self.assertEqual(admin_user, admin_client.client.user)
self.assertEqual(admin_pass, admin_client.client.password)
self.assertEqual('%s%s' % (nova_url_from_conf, admin_tenant_id),
admin_client.client.management_url)
class TestCreateHeatClient(trove_testtools.TestCase):
def setUp(self):
super(TestCreateHeatClient, self).setUp()
self.heat_public_url = 'http://publicURL/v2'
self.heatv3_public_url_region_two = 'http://publicURL-r2/v3'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.heat_public_url,
}
],
'type': 'orchestration'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.heatv3_public_url_region_two,
}
],
'type': 'orchestrationv3'
}
]
def tearDown(self):
super(TestCreateHeatClient, self).tearDown()
cfg.CONF.clear_override('heat_url')
cfg.CONF.clear_override('heat_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_heat_client,
TroveContext())
def test_create_with_conf_override(self):
heat_url_from_conf = 'http://example.com'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('heat_url', heat_url_from_conf,
enforce_type=True)
client = remote.create_heat_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s/%s' % (heat_url_from_conf, tenant_from_ctx),
client.http_client.endpoint)
def test_create_with_conf_override_trailing_slash(self):
heat_url_from_conf = 'http://example.com/'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('heat_url', heat_url_from_conf,
enforce_type=True)
client = remote.create_heat_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (heat_url_from_conf, tenant_from_ctx),
client.http_client.endpoint)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_heat_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.heat_public_url,
client.http_client.endpoint)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('heat_service_type', 'orchestrationv3',
enforce_type=True)
cfg.CONF.set_override('os_region_name', 'RegionTwo',
enforce_type=True)
client = remote.create_heat_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.heatv3_public_url_region_two,
client.http_client.endpoint)
class TestCreateSwiftClient(trove_testtools.TestCase):
def setUp(self):
super(TestCreateSwiftClient, self).setUp()
self.swift_public_url = 'http://publicURL/v2'
self.swiftv3_public_url_region_two = 'http://publicURL-r2/v3'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.swift_public_url,
}
],
'type': 'object-store'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.swiftv3_public_url_region_two,
}
],
'type': 'object-storev3'
}
]
def tearDown(self):
super(TestCreateSwiftClient, self).tearDown()
cfg.CONF.clear_override('swift_url')
cfg.CONF.clear_override('swift_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_swift_client,
TroveContext())
def test_create_with_conf_override(self):
swift_url_from_conf = 'http://example.com/AUTH_'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('swift_url', swift_url_from_conf,
enforce_type=True)
client = remote.create_swift_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (swift_url_from_conf, tenant_from_ctx),
client.url)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_swift_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.swift_public_url,
client.url)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('swift_service_type', 'object-storev3',
enforce_type=True)
cfg.CONF.set_override('os_region_name', 'RegionTwo',
enforce_type=True)
client = remote.create_swift_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.swiftv3_public_url_region_two,
client.url)
class TestEndpoints(trove_testtools.TestCase):
"""
Copied from glance/tests/unit/test_auth.py.
"""
def setUp(self):
super(TestEndpoints, self).setUp()
self.service_catalog = [
{
'endpoint_links': [],
'endpoints': [
{
'adminURL': 'http://localhost:8080/',
'region': 'RegionOne',
'internalURL': 'http://internalURL/',
'publicURL': 'http://publicURL/',
},
{
'adminURL': 'http://localhost:8081/',
'region': 'RegionTwo',
'internalURL': 'http://internalURL2/',
'publicURL': 'http://publicURL2/',
},
],
'type': 'object-store',
'name': 'Object Storage Service',
}
]
def test_get_endpoint_empty_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.get_endpoint,
None)
def test_get_endpoint_with_custom_server_type(self):
endpoint = remote.get_endpoint(self.service_catalog,
service_type='object-store',
endpoint_region='RegionOne')
self.assertEqual('http://publicURL/', endpoint)
def test_get_endpoint_with_custom_endpoint_type(self):
endpoint = remote.get_endpoint(self.service_catalog,
service_type='object-store',
endpoint_type='internalURL',
endpoint_region='RegionOne')
self.assertEqual('http://internalURL/', endpoint)
def test_get_endpoint_raises_with_invalid_service_type(self):
self.assertRaises(exception.NoServiceEndpoint,
remote.get_endpoint,
self.service_catalog,
service_type='foo')
def test_get_endpoint_raises_with_invalid_endpoint_type(self):
self.assertRaises(exception.NoServiceEndpoint,
remote.get_endpoint,
self.service_catalog,
service_type='object-store',
endpoint_type='foo',
endpoint_region='RegionOne')
def test_get_endpoint_raises_with_invalid_endpoint_region(self):
self.assertRaises(exception.NoServiceEndpoint,
remote.get_endpoint,
self.service_catalog,
service_type='object-store',
endpoint_region='foo',
endpoint_type='internalURL')
def test_get_endpoint_ignores_missing_type(self):
service_catalog = [
{
'name': 'Other Service',
},
{
'endpoint_links': [],
'endpoints': [
{
'adminURL': 'http://localhost:8080/',
'region': 'RegionOne',
'internalURL': 'http://internalURL/',
'publicURL': 'http://publicURL/',
},
{
'adminURL': 'http://localhost:8081/',
'region': 'RegionTwo',
'internalURL': 'http://internalURL2/',
'publicURL': 'http://publicURL2/',
},
],
'type': 'object-store',
'name': 'Object Storage Service',
}
]
endpoint = remote.get_endpoint(service_catalog,
service_type='object-store',
endpoint_region='RegionOne')
self.assertEqual('http://publicURL/', endpoint)
| apache-2.0 |
skymanaditya1/numpy | numpy/distutils/command/build_py.py | 264 | 1210 | from __future__ import division, absolute_import, print_function
from distutils.command.build_py import build_py as old_build_py
from numpy.distutils.misc_util import is_string
class build_py(old_build_py):
def run(self):
build_src = self.get_finalized_command('build_src')
if build_src.py_modules_dict and self.packages is None:
self.packages = list(build_src.py_modules_dict.keys ())
old_build_py.run(self)
def find_package_modules(self, package, package_dir):
modules = old_build_py.find_package_modules(self, package, package_dir)
# Find build_src generated *.py files.
build_src = self.get_finalized_command('build_src')
modules += build_src.py_modules_dict.get(package, [])
return modules
def find_modules(self):
old_py_modules = self.py_modules[:]
new_py_modules = [_m for _m in self.py_modules if is_string(_m)]
self.py_modules[:] = new_py_modules
modules = old_build_py.find_modules(self)
self.py_modules[:] = old_py_modules
return modules
# XXX: Fix find_source_files for item in py_modules such that item is 3-tuple
# and item[2] is source file.
| bsd-3-clause |
cloudfoundry-community/etherpad-lite-cf | bin/dirty-db-cleaner.py | 1 | 1520 | #!/usr/bin/env PYTHONUNBUFFERED=1 python
#
# Created by Bjarni R. Einarsson, placed in the public domain. Go wild!
#
import json
import os
import sys
try:
dirtydb_input = sys.argv[1]
dirtydb_output = '%s.new' % dirtydb_input
assert(os.path.exists(dirtydb_input))
assert(not os.path.exists(dirtydb_output))
except:
print()
print('Usage: %s /path/to/dirty.db' % sys.argv[0])
print()
print('Note: Will create a file named dirty.db.new in the same folder,')
print(' please make sure permissions are OK and a file by that')
print(' name does not exist already. This script works by omitting')
print(' duplicate lines from the dirty.db file, keeping only the')
print(' last (latest) instance. No revision data should be lost,')
print(' but be careful, make backups. If it breaks you get to keep')
print(' both pieces!')
print()
sys.exit(1)
dirtydb = {}
lines = 0
with open(dirtydb_input, 'r') as fd:
print('Reading %s' % dirtydb_input)
for line in fd:
lines += 1
try:
data = json.loads(line)
dirtydb[data['key']] = line
except:
print("Skipping invalid JSON!")
if lines % 10000 == 0:
sys.stderr.write('.')
print()
print('OK, found %d unique keys in %d lines' % (len(dirtydb), lines))
with open(dirtydb_output, 'w') as fd:
for data in list(dirtydb.values()):
fd.write(data)
print('Wrote data to %s. All done!' % dirtydb_output)
| apache-2.0 |
vuolter/pyload | src/pyload/plugins/downloaders/ZippyshareCom.py | 2 | 4066 | # -*- coding: utf-8 -*-
import re
import urllib.parse
from bs4 import BeautifulSoup
from pyload.core.utils.misc import eval_js
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.simple_downloader import SimpleDownloader
class ZippyshareCom(SimpleDownloader):
__name__ = "ZippyshareCom"
__type__ = "downloader"
__version__ = "1.00"
__status__ = "testing"
__pattern__ = r"https?://(?P<HOST>www\d{0,3}\.zippyshare\.com)/(?:[vd]/|view\.jsp.*key=)(?P<KEY>[\w^_]+)"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Zippyshare.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("Walter Purcaro", "vuolter@gmail.com"),
("sebdelsol", "seb.morin@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
COOKIES = [("zippyshare.com", "ziplocale", "en")]
URL_REPLACEMENTS = [(__pattern__ + ".*", r"http://\g<HOST>/v/\g<KEY>/file.html")]
NAME_PATTERN = r'"/d/[\w^_]+/".*".*/(?P<N>[^/]+?)";'
SIZE_PATTERN = r'>Size:.+?">(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r"does not exist (anymore )?on this server<"
TEMP_OFFLINE_PATTERN = r"^unmatchable$"
LINK_PATTERN = r"document.location = '(.+?)'"
def setup(self):
self.chunk_limit = -1
self.multi_dl = True
self.resume_download = True
def handle_free(self, pyfile):
self.captcha = ReCaptcha(pyfile)
captcha_key = self.captcha.detect_key()
if captcha_key:
try:
self.link = re.search(self.LINK_PATTERN, self.data)
self.captcha.challenge()
except Exception as exc:
self.error(exc)
else:
self.link = self.fixurl(self.get_link())
if ".com/pd/" in self.link:
self.load(self.link)
self.link = self.link.replace(".com/pd/", ".com/d/")
if self.link and pyfile.name == "file.html":
pyfile.name = urllib.parse.unquote(self.link.split("/")[-1])
def get_link(self):
#: Get all the scripts inside the html body
soup = BeautifulSoup(self.data, 'html.parser')
scripts = [
s.string
for s in soup.body.find_all("script", type="text/javascript")
if "('dlbutton').href =" in (s.string or "")
]
#: Emulate a document in JS
inits = [
"""
var document = {}
document.getElementById = function(x) {
if (!this.hasOwnProperty(x)) {
this[x] = {getAttribute : function(x) { return this[x] } }
}
return this[x]
}
"""
]
#: inits is meant to be populated with the initialization of all the DOM elements found in the scripts
eltRE = r'getElementById\([\'"](.+?)[\'"]\)(\.)?(getAttribute\([\'"])?(\w+)?([\'"]\))?'
for m in re.findall(eltRE, " ".join(scripts)):
JSid, JSattr = m[0], m[3]
values = [
f
for f in (elt.get(JSattr, None) for elt in soup.find_all(id=JSid))
if f
]
if values:
inits.append(
'document.getElementById("{}")["{}"] = "{}"'.format(
JSid, JSattr, values[-1]
)
)
#: Add try/catch in JS to handle deliberate errors
scripts = ["\n".join(("try{", script, "} catch(err){}")) for script in scripts]
#: Get the file's url by evaluating all the scripts
scripts = inits + scripts + ["document.dlbutton.href"]
return eval_js("\n".join(scripts))
| agpl-3.0 |
zofuthan/airmozilla | airmozilla/comments/helpers.py | 13 | 1205 | import hashlib
import re
import jinja2
from jingo import register
from django.template.loader import render_to_string
from airmozilla.comments.models import Comment
@register.function
def recurse_comments(comment, discussion,
request, query_filter, can_manage_comments):
comments = Comment.objects.filter(reply_to=comment)
comments = comments.filter(query_filter)
context = {
'comments': comments.order_by('created'),
'discussion': discussion,
'request': request,
'Comment': Comment,
'can_manage_comments': can_manage_comments,
'root': False,
'query_filter': query_filter,
}
return jinja2.Markup(
render_to_string('comments/comments.html', context)
)
@register.function
def gravatar_src(email, secure, size=None):
if secure:
tmpl = '//secure.gravatar.com/avatar/%s'
else:
tmpl = '//www.gravatar.com/avatar/%s'
url = tmpl % hashlib.md5(email.lower()).hexdigest()
url += '?d=identicon'
if size is not None:
url += '&s=%s' % size
return url
@register.function
def obscure_email(email):
return re.sub('(\w{3})@(\w{3})', '...@...', email)
| bsd-3-clause |
roryk/bipy | bipy/toolbox/novoalign.py | 1 | 1472 | """ handling running novoalign """
from bipy.utils import (build_results_dir, flatten, which, replace_suffix,
flatten_options)
from bcbio.utils import safe_makedir
import os
import subprocess
import logging
logger = logging.getLogger(__name__)
def _build_output_file(input_file, novoalign_config, config):
outdir = build_results_dir(novoalign_config, config)
safe_makedir(outdir)
return os.path.join(outdir,
os.path.basename(replace_suffix(input_file, "sam")))
def _build_command(input_file, ref, novoalign_config):
cmd = [which("novoalign"), flatten_options(novoalign_config),
"-o", "SAM", "-d", ref, "-f", input_file]
return list(map(str, flatten(cmd)))
def run(input_file, ref, novoalign_config, config):
output_file = _build_output_file(input_file, novoalign_config, config)
logger.info("Running novoalign on %s "
"and outputting to %s." % (input_file, output_file))
# skip if we already did this
if os.path.exists(output_file):
logger.info("Skipping %s, already complete." % (input_file))
return output_file
cmd = _build_command(input_file, ref, novoalign_config)
# capture stdout from novoalign and redirect to the output file
with open(output_file, "w") as out_handle:
subprocess.check_call(cmd, stdout=out_handle)
logger.info("Novoalign complete. Output in %s " % (output_file))
return output_file
| mit |
samatt/OpenWPM | automation/SocketInterface.py | 2 | 5024 | import Queue
import threading
import socket
import struct
import json
#TODO - Implement a cleaner shutdown for server socket
# see: https://stackoverflow.com/questions/1148062/python-socket-accept-blocks-prevents-app-from-quitting
class serversocket:
"""
A server socket to recieve and process string messages
from client sockets to a central queue
"""
def __init__(self, verbose=False):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(('localhost', 0))
self.sock.listen(10) # queue a max of n connect requests
self.verbose = verbose
self.queue = Queue.Queue()
if self.verbose:
print "Server bound to: " + str(self.sock.getsockname())
def start_accepting(self):
""" Start the listener thread """
thread = threading.Thread(target=self._accept, args=())
thread.daemon = True # stops from blocking shutdown
thread.start()
def _accept(self):
""" Listen for connections and pass handling to a new thread """
while True:
(client, address) = self.sock.accept()
thread = threading.Thread(target=self._handle_conn, args=(client, address))
thread.daemon = True
thread.start()
def _handle_conn(self, client, address):
"""
Recieve messages and pass to queue. Messages are prefixed with
a 4-byte integer to specify the message length and 1-byte boolean
to indicate serialization with json.
"""
if self.verbose:
print "Thread: " + str(threading.current_thread()) + " connected to: " + str(address)
try:
while True:
msg = self.receive_msg(client, 5)
msglen, is_serialized = struct.unpack('>i?', msg)
if self.verbose:
print "Msglen: " + str(msglen) + " is_serialized: " + str(is_serialized)
msg = self.receive_msg(client, msglen)
if is_serialized:
try:
msg = json.loads(msg)
except UnicodeDecodeError:
try:
msg = json.loads(unicode(msg, 'ISO-8859-1', 'ignore'))
except ValueError:
if self.verbose:
"Unrecognized character encoding during de-serialization."
continue
except ValueError as e:
try:
msg = json.loads(unicode(msg, 'utf-8', 'ignore'))
except ValueError:
if self.verbose:
print "Unrecognized character encoding during de-serialization."
continue
if self.verbose:
print "Message:"
print msg
self.queue.put(msg)
except RuntimeError:
if self.verbose:
print "Client socket: " + str(address) + " closed"
def receive_msg(self, client, msglen):
msg = ''
while len(msg) < msglen:
chunk = client.recv(msglen-len(msg))
if chunk == '':
raise RuntimeError("socket connection broken")
msg = msg + chunk
return msg
def close(self):
self.sock.close()
class clientsocket:
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self, host, port):
self.sock.connect((host, port))
def send(self, msg):
"""
Sends an arbitrary python object to the connected socket. Serializes (json) if
not str, and prepends msg len (4-bytes) and serialization status (1-byte).
"""
#if input not string, serialize to string
if type(msg) is not str:
msg = json.dumps(msg)
is_serialized = True
else:
is_serialized = False
#prepend with message length
msg = struct.pack('>I?', len(msg), is_serialized) + msg
totalsent = 0
while totalsent < len(msg):
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
def close(self):
self.sock.close()
if __name__ == '__main__':
import sys
#Just for testing
if sys.argv[1] == 's':
sock = serversocket(verbose=True)
sock.start_accepting()
raw_input("Press enter to exit...")
sock.close()
elif sys.argv[1] == 'c':
sock = clientsocket()
host = raw_input("Enter the host name:\n")
port = raw_input("Enter the port:\n")
sock.connect(host, int(port))
msg = None
while msg != "quit":
msg = raw_input("Enter a message to send:\n")
sock.send(msg)
sock.close()
| gpl-3.0 |
kanghj/dinner-tables-planner | tables/solve.py | 1 | 10927 | import csv
import uuid
from collections import defaultdict
import subprocess
import os
import re
import tempfile
import boto3
import json
import math
from werkzeug.utils import secure_filename
from botocore.exceptions import ClientError
from openpyxl import load_workbook
from .coarser import coarse_local
s3 = boto3.client('s3')
def represent_in_asp(coarse_to_original, new_community,
new_table_sz, persons, presolved,
clique_weights, table_size):
facts = []
for key, members in coarse_to_original.items():
facts.append('person({}).'.format(key))
already_assigned_to_table = any(
[x[0] == 'in_table' and x[1] == key for x in presolved])
facts.append('person_size({}, {}).'.format(
key, 1 if already_assigned_to_table else len(members)))
# facts.append('total_tables({}).'.format(len(new_table_sz)))
num_original_persons = len(
[value for values in coarse_to_original.values()
for value in values])
facts.append('total_tables_min({}).'.format(
min(math.ceil(num_original_persons / table_size), len(new_table_sz))))
facts.append('total_tables_max({}).'.format(
max(math.ceil(num_original_persons / table_size), len(new_table_sz))))
facts.append('cliques({}).'.format(len(new_community.keys())))
clique_list = [clique for clique in new_community.keys()]
for table_num, table_sz in enumerate(new_table_sz):
facts.append('table_size({}, {}).'.format(table_num, table_sz))
for community_name, members in new_community.items():
clique_number = clique_list.index(community_name) + 1
weight = clique_weights[community_name] \
if community_name in clique_weights else 1
clique_weight_fact = 'clique_weight({}, {}).'.format(
clique_number, weight)
if clique_weight_fact not in facts:
facts.append(clique_weight_fact)
for member in members:
fact = 'in_clique({}, {}).'.format(
member, clique_number)
if fact not in facts:
facts.append(fact)
for presolved_fact in presolved:
if len(presolved_fact) > 2:
fact = '{}({},{}).'.format(
presolved_fact[0], presolved_fact[1], presolved_fact[2])
else:
fact = '{}({}).'.format(
presolved_fact[0], presolved_fact[1])
if fact not in facts:
facts.append(fact)
return facts, persons, coarse_to_original
def community_and_persons_from_file(path, filetype):
persons = []
community = defaultdict(list)
if filetype.endswith('csv'):
with open(path, 'r') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
clique_names = []
for row in reader:
for key, value in row.items():
if len(key.strip()) == 0:
continue
if key not in clique_names:
clique_names.append(key)
if len(value) == 0:
continue
if value not in persons:
persons.append(value)
community[clique_names.index(key) + 1].append(
persons.index(value) + 1)
elif filetype.endswith('xlsx'):
wb = load_workbook(path)
sheet = wb.get_active_sheet()
clique_names = []
for i, column in enumerate(sheet.columns):
col_name = None
for j, cell in enumerate(column):
if j == 0:
col_name = cell.value
if col_name is None or \
(isinstance(col_name, str) and
len(col_name.strip()) == 0):
break # skip empty columns
clique_names.append(col_name)
continue
if cell.value is None or \
(isinstance(cell.value, str) and
len(cell.value) == 0):
continue
if cell.value not in persons:
persons.append(cell.value)
community[clique_names.index(col_name) + 1].append(
persons.index(cell.value) + 1)
return community, persons, clique_names
def parse_clingo_out(output):
pattern = 'in_table\((\d+),(\d+)\)'
if isinstance(output, list):
output_full = '\n'.join(output)
else:
output_full = output
last_answer = output_full.rfind('Answer:')
output_last = output_full[last_answer:]
results = re.findall(pattern, output_last)
tables = defaultdict(list)
for result in results:
person, table = result
tables[table].append(person)
return tables
def save_file(tmpdir_path, file, filename, job_id):
uniq_filename = job_id + '_' + filename
path = os.path.join(tmpdir_path, uniq_filename)
file.save(path)
return path
def write_facts_to_file(facts, job_id):
facts_file = 'facts_' + job_id + '.lp'
with open(facts_file, 'w+') as lp_file:
for fact in facts:
lp_file.write(fact + '\n')
return facts_file
def get_clingo_output(facts_file):
proc = subprocess.Popen(['exec/clingo', '-t 8',
'--time-limit=25', '--stat',
facts_file, 'clingo/enc.lp'],
stdout=subprocess.PIPE)
resp_text = []
for line in proc.stdout:
resp_text.append(line.decode('utf-8'))
return resp_text
def solve_by_clingo(facts, job_id):
facts_file = write_facts_to_file(facts, job_id)
resp_text = get_clingo_output(facts_file)
os.remove(facts_file)
return resp_text
def partition_from_file(table_size, csv_file):
job_id = str(uuid.uuid4())
with tempfile.TemporaryDirectory() as tmpdir:
path = save_file(tmpdir, csv_file,
secure_filename(csv_file.filename), job_id)
community, persons = community_and_persons_from_file(path)
return partition(community, job_id, persons, table_size)
def add_solving_atoms(facts):
new_facts = facts.copy()
with open('clingo/enc.lp') as enc_file:
for line in enc_file:
new_facts.append(line)
return new_facts
def create_staging_file_and_upload_to_s3(table_size, uploaded_file):
job_id = str(uuid.uuid4())
with tempfile.TemporaryDirectory() as tmpdir:
filename, file_extension = os.path.splitext(uploaded_file.filename)
path = save_file(tmpdir, uploaded_file,
secure_filename(uploaded_file.filename), job_id)
community, persons, clique_names = community_and_persons_from_file(
path, file_extension)
s3.put_object(
Bucket='dining-tables-chart',
Key='pickles/{}'.format(job_id),
Body=json.dumps((community, persons, clique_names, table_size)))
return job_id, community, persons, clique_names
def create_file_and_upload_to_s3(job_id, clique_weights_raw):
community, persons, clique_names, table_size = json.loads(
s3.get_object(
Bucket='dining-tables-chart',
Key='pickles/{}'.format(job_id))['Body'].read().decode('utf-8'))
# convert keys of clique_weights to use the clique numbers instead
clique_weights = {str(clique_names.index(key) + 1): int(values)
for key, values in clique_weights_raw.items()}
new_table_sz, new_community, coarse_to_original, presolved = \
coarse_local(community, table_size, clique_weights)
facts, persons, coarse_nodes_to_persons = represent_in_asp(
coarse_to_original, new_community, new_table_sz,
persons, presolved, clique_weights, table_size)
facts = add_solving_atoms(facts)
s3.put_object(Bucket='dining-tables-chart',
Key='lp/{}.lp'.format(job_id),
Body='\n'.join(facts))
s3.put_object(
Bucket='dining-tables-solved',
Key='pickles/{}'.format(job_id),
Body=json.dumps((persons, coarse_nodes_to_persons,
clique_names, new_community)))
return job_id
def get_tables_from_clingo_out(resp_text, coarse_nodes_to_persons):
# resp_text = [resp_text_line.strip() for resp_text_line in resp_text]
coarse_tables = parse_clingo_out(resp_text)
tables = {}
for table_num, nodes in coarse_tables.items():
original_persons = []
for node in nodes:
try:
original_persons.extend(coarse_nodes_to_persons[int(node)])
except KeyError as e:
original_persons.extend(coarse_nodes_to_persons[node])
tables[table_num] = original_persons
is_final = is_final_text(resp_text)
return tables, is_final
def is_final_text(resp_text):
text = resp_text if isinstance(resp_text, str) \
else ' '.join(resp_text)
return 'SATISFIABLE' in text or 'UNSATISFIABLE' in text \
or 'OPTIMUM FOUND' in text
def partition(community, job_id, persons, table_size, clique_weights):
new_table_sz, new_community, coarse_to_original, presolved = \
coarse_local(community, table_size, clique_weights)
facts, persons, coarse_nodes_to_persons = represent_in_asp(
coarse_to_original, new_community, new_table_sz,
persons, presolved, clique_weights, table_size)
resp_text = solve_by_clingo(facts, job_id)
tables, is_final = get_tables_from_clingo_out(
resp_text, coarse_nodes_to_persons)
return tables, persons, is_final
def ans_from_s3_ans_bucket(job_id):
try:
readfile = s3.get_object(
Bucket='dining-tables-solved',
Key='{}.lp.ans'.format(job_id))['Body'].read().decode('utf-8')
except ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchKey':
return None, None, None, None, None, None
persons, coarse_to_original, clique_names, new_community = json.loads(
s3.get_object(
Bucket='dining-tables-solved',
Key='pickles/{}'.format(job_id))['Body'].read().decode('utf-8'))
tables, is_final = get_tables_from_clingo_out(readfile, coarse_to_original)
return tables, \
persons, coarse_to_original, new_community, clique_names, is_final
def delete_job(job_id):
"""
Deletes s3 files relating to job_id.
TODO Should kill any Batch program too
:param job_id:
:return:
"""
s3.delete_object(Bucket='dining-tables-solved',
Key='{}.lp.ans'.format(job_id))
s3.delete_object(Bucket='dining-tables-solved',
Key='pickles/{}'.format(job_id),)
s3.delete_object(Bucket='dining-tables-chart',
Key='lp/{}.lp'.format(job_id))
| mit |
blois/AndroidSDKCloneMin | ndk/prebuilt/linux-x86_64/lib/python2.7/sgmllib.py | 306 | 17884 | """A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
from warnings import warnpy3k
warnpy3k("the sgmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| apache-2.0 |
bhargav/scikit-learn | sklearn/ensemble/tests/test_forest.py | 26 | 41675 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
sudovijay/youtube-dl | youtube_dl/extractor/tweakers.py | 37 | 2227 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
xpath_text,
xpath_with_ns,
int_or_none,
float_or_none,
)
class TweakersIE(InfoExtractor):
_VALID_URL = r'https?://tweakers\.net/video/(?P<id>\d+)'
_TEST = {
'url': 'https://tweakers.net/video/9926/new-nintendo-3ds-xl-op-alle-fronten-beter.html',
'md5': '1b5afa817403bb5baa08359dca31e6df',
'info_dict': {
'id': '9926',
'ext': 'mp4',
'title': 'New Nintendo 3DS XL - Op alle fronten beter',
'description': 'md5:f97324cc71e86e11c853f0763820e3ba',
'thumbnail': 're:^https?://.*\.jpe?g$',
'duration': 386,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
playlist = self._download_xml(
'https://tweakers.net/video/s1playlist/%s/playlist.xspf' % video_id,
video_id)
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
track = playlist.find(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP))
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title')
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'),
1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| unlicense |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/tensorflow/contrib/slim/python/slim/nets/vgg.py | 25 | 10637 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains model definitions for versions of the Oxford VGG network.
These model definitions were introduced in the following technical report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
Usage:
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_a(inputs)
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_16(inputs)
@@vgg_a
@@vgg_16
@@vgg_19
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
def vgg_a(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_a'):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_a.default_image_size = 224
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16'):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_16.default_image_size = 224
def vgg_19(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_19'):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_19', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
vgg_19.default_image_size = 224
# Alias
vgg_d = vgg_16
vgg_e = vgg_19
| agpl-3.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/numpy/distutils/command/autodist.py | 148 | 2048 | """This module implements additional tests ala autoconf which can be useful.
"""
from __future__ import division, absolute_import, print_function
# We put them here since they could be easily reused outside numpy.distutils
def check_inline(cmd):
"""Return the inline identifier (may be empty)."""
cmd._check_compiler()
body = """
#ifndef __cplusplus
static %(inline)s int static_func (void)
{
return 0;
}
%(inline)s int nostatic_func (void)
{
return 0;
}
#endif"""
for kw in ['inline', '__inline__', '__inline']:
st = cmd.try_compile(body % {'inline': kw}, None, None)
if st:
return kw
return ''
def check_restrict(cmd):
"""Return the restrict identifier (may be empty)."""
cmd._check_compiler()
body = """
static int static_func (char * %(restrict)s a)
{
return 0;
}
"""
for kw in ['restrict', '__restrict__', '__restrict']:
st = cmd.try_compile(body % {'restrict': kw}, None, None)
if st:
return kw
return ''
def check_compiler_gcc4(cmd):
"""Return True if the C compiler is GCC 4.x."""
cmd._check_compiler()
body = """
int
main()
{
#if (! defined __GNUC__) || (__GNUC__ < 4)
#error gcc >= 4 required
#endif
return 0;
}
"""
return cmd.try_compile(body, None, None)
def check_gcc_function_attribute(cmd, attribute, name):
"""Return True if the given function attribute is supported."""
cmd._check_compiler()
body = """
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s %s(void*);
int
main()
{
return 0;
}
""" % (attribute, name)
return cmd.try_compile(body, None, None) != 0
def check_gcc_variable_attribute(cmd, attribute):
"""Return True if the given variable attribute is supported."""
cmd._check_compiler()
body = """
#pragma GCC diagnostic error "-Wattributes"
#pragma clang diagnostic error "-Wattributes"
int %s foo;
int
main()
{
return 0;
}
""" % (attribute, )
return cmd.try_compile(body, None, None) != 0
| agpl-3.0 |
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Common/TargetTxtClassObject.py | 1 | 8415 | ## @file
# This file is used to define each component of Target.txt file
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import EdkLogger
import DataType
from BuildToolError import *
import GlobalData
from Common.LongFilePathSupport import OpenLongFilePath as open
gDefaultTargetTxtFile = "target.txt"
## TargetTxtClassObject
#
# This class defined content used in file target.txt
#
# @param object: Inherited from object class
# @param Filename: Input value for full path of target.txt
#
# @var TargetTxtDictionary: To store keys and values defined in target.txt
#
class TargetTxtClassObject(object):
def __init__(self, Filename = None):
self.TargetTxtDictionary = {
DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM : '',
DataType.TAB_TAT_DEFINES_ACTIVE_MODULE : '',
DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF : '',
DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER : '',
DataType.TAB_TAT_DEFINES_TARGET : [],
DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG : [],
DataType.TAB_TAT_DEFINES_TARGET_ARCH : [],
DataType.TAB_TAT_DEFINES_BUILD_RULE_CONF : '',
}
self.ConfDirectoryPath = ""
if Filename != None:
self.LoadTargetTxtFile(Filename)
## LoadTargetTxtFile
#
# Load target.txt file and parse it, return a set structure to store keys and values
#
# @param Filename: Input value for full path of target.txt
#
# @retval set() A set structure to store keys and values
# @retval 1 Error happenes in parsing
#
def LoadTargetTxtFile(self, Filename):
if os.path.exists(Filename) and os.path.isfile(Filename):
return self.ConvertTextFileToDict(Filename, '#', '=')
else:
EdkLogger.error("Target.txt Parser", FILE_NOT_FOUND, ExtraData=Filename)
return 1
## ConvertTextFileToDict
#
# Convert a text file to a dictionary of (name:value) pairs.
# The data is saved to self.TargetTxtDictionary
#
# @param FileName: Text filename
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
#
# @retval 0 Convert successfully
# @retval 1 Open file failed
#
def ConvertTextFileToDict(self, FileName, CommentCharacter, KeySplitCharacter):
F = None
try:
F = open(FileName, 'r')
self.ConfDirectoryPath = os.path.dirname(FileName)
except:
EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=FileName)
if F != None:
F.close()
for Line in F:
Line = Line.strip()
if Line.startswith(CommentCharacter) or Line == '':
continue
LineList = Line.split(KeySplitCharacter, 1)
Key = LineList[0].strip()
if len(LineList) == 2:
Value = LineList[1].strip()
else:
Value = ""
if Key in [DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM, DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF, \
DataType.TAB_TAT_DEFINES_ACTIVE_MODULE, DataType.TAB_TAT_DEFINES_BUILD_RULE_CONF]:
self.TargetTxtDictionary[Key] = Value.replace('\\', '/')
if Key == DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF and self.TargetTxtDictionary[Key]:
if self.TargetTxtDictionary[Key].startswith("Conf/"):
Tools_Def = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].strip())
if not os.path.exists(Tools_Def) or not os.path.isfile(Tools_Def):
# If Conf/Conf does not exist, try just the Conf/ directory
Tools_Def = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].replace("Conf/", "", 1).strip())
else:
# The File pointed to by TOOL_CHAIN_CONF is not in a Conf/ directory
Tools_Def = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].strip())
self.TargetTxtDictionary[Key] = Tools_Def
if Key == DataType.TAB_TAT_DEFINES_BUILD_RULE_CONF and self.TargetTxtDictionary[Key]:
if self.TargetTxtDictionary[Key].startswith("Conf/"):
Build_Rule = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].strip())
if not os.path.exists(Build_Rule) or not os.path.isfile(Build_Rule):
# If Conf/Conf does not exist, try just the Conf/ directory
Build_Rule = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].replace("Conf/", "", 1).strip())
else:
# The File pointed to by BUILD_RULE_CONF is not in a Conf/ directory
Build_Rule = os.path.join(self.ConfDirectoryPath, self.TargetTxtDictionary[Key].strip())
self.TargetTxtDictionary[Key] = Build_Rule
elif Key in [DataType.TAB_TAT_DEFINES_TARGET, DataType.TAB_TAT_DEFINES_TARGET_ARCH, \
DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]:
self.TargetTxtDictionary[Key] = Value.split()
elif Key == DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER:
try:
V = int(Value, 0)
except:
EdkLogger.error("build", FORMAT_INVALID, "Invalid number of [%s]: %s." % (Key, Value),
File=FileName)
self.TargetTxtDictionary[Key] = Value
#elif Key not in GlobalData.gGlobalDefines:
# GlobalData.gGlobalDefines[Key] = Value
F.close()
return 0
## Print the dictionary
#
# Print all items of dictionary one by one
#
# @param Dict: The dictionary to be printed
#
def printDict(Dict):
if Dict != None:
KeyList = Dict.keys()
for Key in KeyList:
if Dict[Key] != '':
print Key + ' = ' + str(Dict[Key])
## Print the dictionary
#
# Print the items of dictionary which matched with input key
#
# @param list: The dictionary to be printed
# @param key: The key of the item to be printed
#
def printList(Key, List):
if type(List) == type([]):
if len(List) > 0:
if Key.find(TAB_SPLIT) != -1:
print "\n" + Key
for Item in List:
print Item
## TargetTxtDict
#
# Load target.txt in input Conf dir
#
# @param ConfDir: Conf dir
#
# @retval Target An instance of TargetTxtClassObject() with loaded target.txt
#
def TargetTxtDict(ConfDir):
Target = TargetTxtClassObject()
# VBox begin
if 'VBOX_TARGET_CONF' in os.environ:
Target.LoadTargetTxtFile(os.path.abspath(os.environ['VBOX_TARGET_CONF']))
return Target
# VBox end
Target.LoadTargetTxtFile(os.path.normpath(os.path.join(ConfDir, gDefaultTargetTxtFile)))
return Target
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
Target = TargetTxtDict(os.getenv("WORKSPACE"))
print Target.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
print Target.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
print Target.TargetTxtDictionary
| gpl-2.0 |
BehavioralInsightsTeam/edx-platform | lms/djangoapps/course_api/blocks/transformers/milestones.py | 14 | 6712 | """
Milestones Transformer
"""
import logging
from django.conf import settings
from edx_proctoring.api import get_attempt_status_summary
from edx_proctoring.exceptions import ProctoredExamNotFoundException
from openedx.core.djangoapps.content.block_structure.transformer import (
BlockStructureTransformer,
)
from student.models import EntranceExamConfiguration
from util import milestones_helpers
log = logging.getLogger(__name__)
class MilestonesAndSpecialExamsTransformer(BlockStructureTransformer):
"""
A transformer that handles both milestones and special (timed) exams.
It includes or excludes all unfulfilled milestones from the student view based on the value of `include_gated_sections`.
An entrance exam is considered a milestone, and is not considered a "special exam".
It also includes or excludes all special (timed) exams (timed, proctored, practice proctored) in/from the
student view, based on the value of `include_special_exams`.
"""
WRITE_VERSION = 1
READ_VERSION = 1
@classmethod
def name(cls):
return "milestones"
def __init__(self, include_special_exams=True, include_gated_sections=True):
self.include_special_exams = include_special_exams
self.include_gated_sections = include_gated_sections
@classmethod
def collect(cls, block_structure):
"""
Computes any information for each XBlock that's necessary to execute
this transformer's transform method.
Arguments:
block_structure (BlockStructureCollectedData)
"""
block_structure.request_xblock_fields('is_proctored_enabled')
block_structure.request_xblock_fields('is_practice_exam')
block_structure.request_xblock_fields('is_timed_exam')
block_structure.request_xblock_fields('entrance_exam_id')
def transform(self, usage_info, block_structure):
"""
Modify block structure according to the behavior of milestones and special exams.
"""
required_content = self.get_required_content(usage_info, block_structure)
def user_gated_from_block(block_key):
"""
Checks whether the user is gated from accessing this block, first via special exams,
then via a general milestones check.
"""
if usage_info.has_staff_access:
return False
elif self.gated_by_required_content(block_key, block_structure, required_content):
return True
elif not self.include_gated_sections and self.has_pending_milestones_for_user(block_key, usage_info):
return True
elif (settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
(self.is_special_exam(block_key, block_structure) and
not self.include_special_exams)):
return True
return False
for block_key in block_structure.topological_traversal():
if user_gated_from_block(block_key):
block_structure.remove_block(block_key, False)
elif self.is_special_exam(block_key, block_structure):
self.add_special_exam_info(block_key, block_structure, usage_info)
@staticmethod
def is_special_exam(block_key, block_structure):
"""
Test whether the block is a special exam.
"""
return (
block_structure.get_xblock_field(block_key, 'is_proctored_enabled') or
block_structure.get_xblock_field(block_key, 'is_practice_exam') or
block_structure.get_xblock_field(block_key, 'is_timed_exam')
)
@staticmethod
def has_pending_milestones_for_user(block_key, usage_info):
"""
Test whether the current user has any unfulfilled milestones preventing
them from accessing this block.
"""
return bool(milestones_helpers.get_course_content_milestones(
unicode(block_key.course_key),
unicode(block_key),
'requires',
usage_info.user.id
))
# TODO: As part of a cleanup effort, this transformer should be split into
# MilestonesTransformer and SpecialExamsTransformer, which are completely independent.
def add_special_exam_info(self, block_key, block_structure, usage_info):
"""
For special exams, add the special exam information to the course blocks.
"""
special_exam_attempt_context = None
try:
# Calls into edx_proctoring subsystem to get relevant special exam information.
# This will return None, if (user, course_id, content_id) is not applicable.
special_exam_attempt_context = get_attempt_status_summary(
usage_info.user.id,
unicode(block_key.course_key),
unicode(block_key)
)
except ProctoredExamNotFoundException as ex:
log.exception(ex)
if special_exam_attempt_context:
# This user has special exam context for this block so add it.
block_structure.set_transformer_block_field(
block_key,
self,
'special_exam_info',
special_exam_attempt_context,
)
@staticmethod
def get_required_content(usage_info, block_structure):
"""
Get the required content for the course.
This takes into account if the user can skip the entrance exam.
"""
course_key = block_structure.root_block_usage_key.course_key
user_can_skip_entrance_exam = EntranceExamConfiguration.user_can_skip_entrance_exam(usage_info.user, course_key)
required_content = milestones_helpers.get_required_content(course_key, usage_info.user)
if not required_content:
return required_content
if user_can_skip_entrance_exam:
# remove the entrance exam from required content
entrance_exam_id = block_structure.get_xblock_field(block_structure.root_block_usage_key, 'entrance_exam_id')
required_content = [content for content in required_content if not content == entrance_exam_id]
return required_content
@staticmethod
def gated_by_required_content(block_key, block_structure, required_content):
"""
Returns True if the current block associated with the block_key should be gated by the given required_content.
Returns False otherwise.
"""
if not required_content:
return False
if block_key.block_type == 'chapter' and unicode(block_key) not in required_content:
return True
return False
| agpl-3.0 |
HalcyonChimera/osf.io | website/search_migration/__init__.py | 2 | 34031 | JSON_UPDATE_NODES_SQL = """
SELECT json_agg(
json_build_object(
'_type', CASE
WHEN N.type = 'osf.registration'
THEN 'registration'
WHEN PREPRINT.URL IS NOT NULL
THEN 'preprint'
WHEN PARENT_GUID._id IS NULL
THEN 'project'
ELSE 'component'
END
, '_index', '{index}'
, 'doc_as_upsert', TRUE
, '_id', NODE_GUID._id
, '_op_type', 'update'
, 'doc', json_build_object(
'contributors', (SELECT json_agg(json_build_object(
'url', CASE
WHEN U.is_active
THEN '/' || USER_GUID._id || '/'
ELSE NULL
END
, 'fullname', U.fullname
))
FROM osf_osfuser AS U
INNER JOIN osf_contributor AS CONTRIB
ON (U.id = CONTRIB.user_id)
LEFT OUTER JOIN osf_guid AS USER_GUID
ON (U.id = USER_GUID.object_id AND (USER_GUID.content_type_id = (SELECT id FROM django_content_type WHERE model = 'osfuser')))
WHERE (CONTRIB.node_id = N.id AND CONTRIB.visible = TRUE))
, 'extra_search_terms', CASE
WHEN strpos(N.title, '-') + strpos(N.title, '_') + strpos(N.title, '.') > 0
THEN translate(N.title, '-_.', ' ')
ELSE ''
END
, 'normalized_title', N.title
, 'registered_date', N.registered_date
, 'id', NODE_GUID._id
, 'category', CASE
WHEN N.type = 'osf.registration'
THEN 'registration'
WHEN PREPRINT.URL IS NOT NULL
THEN 'preprint'
WHEN PARENT_GUID._id IS NULL
THEN 'project'
ELSE 'component'
END
, 'title', N.title
, 'parent_id', PARENT_GUID._id
, 'embargo_end_date', EMBARGO.DATA ->> 'end_date'
, 'is_pending_registration', CASE
WHEN N.type = 'osf.registration'
THEN REGISTRATION_APPROVAL.PENDING
ELSE FALSE
END
, 'is_pending_embargo', EMBARGO.DATA ->> 'pending'
, 'is_registration', N.type = 'osf.registration'
, 'is_pending_retraction', RETRACTION.state = 'pending'
, 'is_retracted', RETRACTION.state = 'approved'
, 'preprint_url', PREPRINT.URL
, 'boost', CASE
WHEN N.type = 'osf.node'
THEN 2
ELSE 1
END
, 'public', N.is_public
, 'description', N.description
, 'tags', (CASE
WHEN TAGS.names IS NOT NULL
THEN TAGS.names
ELSE
'{{}}'::TEXT[]
END)
, 'affiliated_institutions', (SELECT array_agg(INST.name)
FROM osf_institution AS INST
INNER JOIN osf_abstractnode_affiliated_institutions
ON (INST.id = osf_abstractnode_affiliated_institutions.institution_id)
WHERE osf_abstractnode_affiliated_institutions.abstractnode_id = N.id)
, 'license', json_build_object(
'text', LICENSE.text
, 'name', LICENSE.name
, 'id', LICENSE.license_id
, 'copyright_holders', LICENSE.copyright_holders
, 'year', LICENSE.year
)
, 'url', '/' || NODE_GUID._id || '/'
, 'date_created', N.created
, 'wikis', CASE
WHEN RETRACTION.state != 'approved'
THEN
(SELECT json_agg(json_build_object(
translate(WP.page_name, '.', ' '), WV."content"
))
FROM addons_wiki_wikipage AS WP
LEFT JOIN LATERAL (
SELECT
content,
identifier
FROM addons_wiki_wikiversion
WHERE addons_wiki_wikiversion.wiki_page_id = WP.id
ORDER BY identifier DESC
LIMIT 1
) WV ON TRUE
WHERE WP.node_id = N.id
AND WP.deleted IS NULL)
ELSE
'{{}}'::JSON
END
)
)
)
FROM osf_abstractnode AS N
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = N.id
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'abstractnode')
LIMIT 1
) NODE_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = (
SELECT parent_id
FROM osf_noderelation
WHERE child_id = N.id
AND is_node_link = FALSE
LIMIT 1)
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'abstractnode')
LIMIT 1
) PARENT_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT array_agg(TAG.name) as names
FROM osf_tag AS TAG
INNER JOIN osf_abstractnode_tags ON (TAG.id = osf_abstractnode_tags.tag_id)
WHERE (TAG.system = FALSE AND osf_abstractnode_tags.abstractnode_id = N.id)
LIMIT 1
) TAGS ON TRUE
LEFT JOIN LATERAL (
SELECT
osf_nodelicense.license_id,
osf_nodelicense.name,
osf_nodelicense.text,
osf_nodelicenserecord.year,
osf_nodelicenserecord.copyright_holders
FROM osf_nodelicenserecord
INNER JOIN osf_abstractnode ON (osf_nodelicenserecord.id = osf_abstractnode.node_license_id)
LEFT OUTER JOIN osf_nodelicense ON (osf_nodelicenserecord.node_license_id = osf_nodelicense.id)
WHERE osf_abstractnode.id = N.id
) LICENSE ON TRUE
LEFT JOIN LATERAL (SELECT (
CASE WHEN N.type = 'osf.registration'
THEN
(CASE WHEN N.retraction_id IS NOT NULL
THEN
(SELECT state
FROM osf_retraction
WHERE id = N.retraction_id)
ELSE
(WITH RECURSIVE ascendants AS (
SELECT
parent_id,
child_id,
1 AS LEVEL,
ARRAY [child_id] AS cids,
'' :: VARCHAR AS state
FROM osf_noderelation
WHERE is_node_link IS FALSE AND child_id = N.id
UNION ALL
SELECT
S.parent_id,
D.child_id,
D.level + 1,
D.cids || S.child_id,
R.state
FROM ascendants AS D
INNER JOIN osf_noderelation AS S
ON D.parent_id = S.child_id
INNER JOIN osf_abstractnode AS A
ON D.child_id = A.id
INNER JOIN osf_retraction AS R
ON A.retraction_id = R.id
WHERE S.is_node_link IS FALSE
AND N.id = ANY (cids)
) SELECT state
FROM ascendants
WHERE child_id = N.id
AND state IS NOT NULL
ORDER BY LEVEL ASC
LIMIT 1)
END)
ELSE
(SELECT '' :: VARCHAR AS state)
END
)) RETRACTION ON TRUE
LEFT JOIN LATERAL (
SELECT (
CASE WHEN N.type = 'osf.registration'
THEN (
CASE WHEN N.embargo_id IS NOT NULL
THEN (
SELECT json_build_object(
'pending', state = 'unapproved',
'end_date',
CASE WHEN state = 'approved'
THEN
TO_CHAR(end_date, 'Day, Mon DD, YYYY')
ELSE
NULL
END
) AS DATA
FROM osf_retraction
WHERE id = N.retraction_id
)
ELSE (
WITH RECURSIVE ascendants AS (
SELECT
parent_id,
child_id,
1 AS LEVEL,
ARRAY [child_id] AS cids,
'' :: VARCHAR AS state,
NULL :: TIMESTAMP WITH TIME ZONE AS end_date
FROM osf_noderelation
WHERE is_node_link IS FALSE AND child_id = N.id
UNION ALL
SELECT
S.parent_id,
D.child_id,
D.level + 1,
D.cids || S.child_id,
E.state,
E.end_date
FROM ascendants AS D
JOIN osf_noderelation AS S
ON D.parent_id = S.child_id
JOIN osf_abstractnode AS A
ON D.child_id = A.id
JOIN osf_embargo AS E
ON A.retraction_id = E.id
WHERE S.is_node_link IS FALSE
AND N.id = ANY (cids)
) SELECT json_build_object(
'pending', state = 'unapproved',
'end_date',
CASE WHEN state = 'approved'
THEN
TO_CHAR(end_date, 'Day, Mon DD, YYYY')
ELSE
NULL
END
) AS DATA
FROM ascendants
WHERE child_id = N.id
AND state IS NOT NULL
ORDER BY LEVEL ASC
LIMIT 1
) END
)
ELSE (
SELECT json_build_object(
'pending', FALSE,
'end_date', NULL
) AS DATA
) END
)
) EMBARGO ON TRUE
LEFT JOIN LATERAL ( SELECT (
CASE WHEN N.type = 'osf.registration' AND N.registration_approval_id IS NOT NULL
THEN (
SELECT state = 'unapproved' AS PENDING
FROM osf_registrationapproval
WHERE id = N.retraction_id
)
ELSE (
SELECT FALSE AS PENDING
) END)
) REGISTRATION_APPROVAL ON TRUE
LEFT JOIN LATERAL (
SELECT
CASE WHEN ((osf_abstractprovider.domain_redirect_enabled AND osf_abstractprovider.domain IS NOT NULL) OR
osf_abstractprovider._id = 'osf')
THEN
'/' || (SELECT G._id
FROM osf_guid G
WHERE (G.object_id = P.id)
AND (G.content_type_id = (SELECT id FROM django_content_type WHERE model = 'preprintservice'))
ORDER BY created ASC, id ASC
LIMIT 1) || '/'
ELSE
'/preprints/' || osf_abstractprovider._id || '/' || (SELECT G._id
FROM osf_guid G
WHERE (G.object_id = P.id)
AND (G.content_type_id = (SELECT id FROM django_content_type WHERE model = 'preprintservice'))
ORDER BY created ASC, id ASC
LIMIT 1) || '/'
END AS URL
FROM osf_preprintservice P
INNER JOIN osf_abstractprovider ON P.provider_id = osf_abstractprovider.id
WHERE P.node_id = N.id
AND P.machine_state != 'initial' -- is_preprint
AND N.preprint_file_id IS NOT NULL
AND N.is_public = TRUE
AND N._is_preprint_orphan != TRUE
ORDER BY P.is_published DESC, P.created DESC
LIMIT 1
) PREPRINT ON TRUE
WHERE (TYPE = 'osf.node' OR TYPE = 'osf.registration')
AND is_public IS TRUE
AND is_deleted IS FALSE
AND (spam_status IS NULL OR NOT (spam_status = 2 or (spam_status = 1 AND {spam_flagged_removed_from_search})))
AND NOT (UPPER(N.title::text) LIKE UPPER('%Bulk stress 201%') OR UPPER(N.title::text) LIKE UPPER('%Bulk stress 202%') OR UPPER(N.title::text) LIKE UPPER('%OSF API Registration test%') -- is_qa_node
OR N.id IN -- Comes from website.settings.DO_NOT_INDEX_LIST
(SELECT THRUTAGS.abstractnode_id
FROM osf_abstractnode_tags THRUTAGS
INNER JOIN osf_tag TAGS ON (THRUTAGS.tag_id = TAGS.id)
WHERE (TAGS.name = 'qatest'
OR TAGS.name = 'qa test')))
AND NOT (N.id IN -- node.archiving
(SELECT AJ.dst_node_id -- May need to be made recursive as AJ table grows
FROM osf_archivejob AJ
WHERE (AJ.status != 'FAILURE' AND AJ.status != 'SUCCESS'
AND AJ.dst_node_id IS NOT NULL)))
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_UPDATE_FILES_SQL = """
SELECT json_agg(
json_build_object(
'_type', 'file'
, '_index', '{index}'
, 'doc_as_upsert', TRUE
, '_id', F._id
, '_op_type', 'update'
, 'doc', json_build_object(
'id', F._id
, 'deep_url', CASE WHEN F.provider = 'osfstorage'
THEN '/' || (NODE.DATA ->> 'guid') || '/files/' || F.provider || '/' || F._id
ELSE '/' || (NODE.DATA ->> 'guid') || '/files/' || F.provider || F._path
END
, 'guid_url', CASE WHEN FILE_GUID._id IS NOT NULL
THEN '/' || FILE_GUID._id || '/'
ELSE NULL
END
, 'tags', (CASE
WHEN TAGS.names IS NOT NULL
THEN TAGS.names
ELSE
'{{}}'::TEXT[]
END)
, 'name', F.name
, 'category', 'file'
, 'node_url', '/' || (NODE.DATA ->> 'guid') || '/'
, 'node_title', NODE.DATA ->> 'title'
, 'parent_id', NODE.DATA ->> 'parent_guid'
, 'is_registration', NODE.DATA ->> 'is_registration' = 'true' -- Weirdness from the lateral join causes this to be a string
, 'is_retracted', NODE.DATA ->> 'is_retracted' = 'true' -- Weirdness from the lateral join causes this to be a string
, 'extra_search_terms', CASE WHEN strpos(F.name, '-') + strpos(F.name, '_') + strpos(F.name, '.') > 0
THEN translate(F.name, '-_.', ' ')
ELSE ''
END
)
)
)
FROM osf_basefilenode AS F
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = F.id
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'basefilenode')
LIMIT 1
) FILE_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT array_agg(TAG.name) AS names
FROM osf_tag AS TAG
INNER JOIN osf_basefilenode_tags ON (TAG.id = osf_basefilenode_tags.tag_id)
WHERE (TAG.system = FALSE AND osf_basefilenode_tags.basefilenode_id = F.id)
) TAGS ON TRUE
LEFT JOIN LATERAL (
SELECT json_build_object(
'is_registration', (CASE WHEN N.type = 'osf.registration'
THEN TRUE
ELSE FALSE
END)
, 'title', N.title
, 'guid', (SELECT _id
FROM osf_guid
WHERE object_id = N.id
AND content_type_id = (SELECT id
FROM django_content_type
WHERE model = 'abstractnode')
LIMIT 1)
, 'parent_guid', (SELECT _id
FROM osf_guid
WHERE object_id = (
SELECT parent_id
FROM osf_noderelation
WHERE child_id = N.id
AND is_node_link = FALSE
LIMIT 1)
AND content_type_id = (SELECT id
FROM django_content_type
WHERE model = 'abstractnode')
LIMIT 1)
, 'is_retracted', (CASE WHEN N.type = 'osf.registration'
THEN
(CASE WHEN N.retraction_id IS NOT NULL
THEN
(SELECT state = 'approved'
FROM osf_retraction
WHERE id = N.retraction_id)
ELSE
(WITH RECURSIVE ascendants AS (
SELECT
parent_id,
child_id,
1 AS LEVEL,
ARRAY [child_id] AS cids,
FALSE AS is_retracted
FROM osf_noderelation
WHERE is_node_link IS FALSE AND child_id = N.id
UNION ALL
SELECT
S.parent_id,
D.child_id,
D.level + 1,
D.cids || S.child_id,
R.state = 'approved' AS is_retracted
FROM ascendants AS D
INNER JOIN osf_noderelation AS S
ON D.parent_id = S.child_id
INNER JOIN osf_abstractnode AS A
ON D.child_id = A.id
INNER JOIN osf_retraction AS R
ON A.retraction_id = R.id
WHERE S.is_node_link IS FALSE
AND N.id = ANY (cids)
) SELECT is_retracted
FROM ascendants
WHERE child_id = N.id
ORDER BY is_retracted DESC -- Put TRUE at the top
LIMIT 1)
END)
ELSE
FALSE
END)
) AS DATA
FROM osf_abstractnode N
WHERE (N.id = F.target_object_id AND (
SELECT id FROM "django_content_type" WHERE (
"django_content_type"."model" = 'abstractnode' AND "django_content_type"."app_label" = 'osf'
)) = F.target_content_type_id)
LIMIT 1
) NODE ON TRUE
WHERE name IS NOT NULL
AND name != ''
AND target_object_id = ANY (SELECT id
FROM osf_abstractnode
WHERE (TYPE = 'osf.node' OR TYPE = 'osf.registration' OR TYPE = 'osf.quickfilesnode')
AND is_public IS TRUE
AND is_deleted IS FALSE
AND (spam_status IS NULL OR NOT (spam_status = 2 or (spam_status = 1 AND {spam_flagged_removed_from_search})))
AND NOT (UPPER(osf_abstractnode.title::text) LIKE UPPER('%Bulk stress 201%') OR UPPER(osf_abstractnode.title::text) LIKE UPPER('%Bulk stress 202%') OR UPPER(osf_abstractnode.title::text) LIKE UPPER('%OSF API Registration test%') -- is_qa_node
OR "osf_abstractnode"."id" IN
(SELECT THRUTAGS.abstractnode_id
FROM osf_abstractnode_tags THRUTAGS
INNER JOIN osf_tag TAGS ON (THRUTAGS.tag_id = TAGS.id)
WHERE (TAGS.name = 'qatest'
OR TAGS.name = 'qa test')))
AND NOT (osf_abstractnode.id IN
(SELECT AJ.dst_node_id
FROM osf_archivejob AJ
WHERE (AJ.status != 'FAILURE' AND AJ.status != 'SUCCESS'
AND AJ.dst_node_id IS NOT NULL)))
)
AND target_content_type_id = (SELECT id FROM "django_content_type" WHERE ("django_content_type"."model" = 'abstractnode' AND "django_content_type"."app_label" = 'osf'))
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_UPDATE_USERS_SQL = """
SELECT json_agg(
json_build_object(
'_type', 'user'
, '_index', '{index}'
, 'doc_as_upsert', TRUE
, '_id', USER_GUID._id
, '_op_type', 'update'
, 'doc', json_build_object(
'id', USER_GUID._id
, 'user', U.fullname
, 'normalized_user', U.fullname
, 'normalized_names', json_build_object(
'fullname', U.fullname
, 'given_name', U.given_name
, 'family_name', U.family_name
, 'middle_names', U.middle_names
, 'suffix', U.suffix
)
, 'names', json_build_object(
'fullname', U.fullname
, 'given_name', U.given_name
, 'family_name', U.family_name
, 'middle_names', U.middle_names
, 'suffix', U.suffix
)
, 'job', CASE
WHEN U.jobs :: JSON -> 0 -> 'institution' IS NOT NULL
THEN
(U.jobs :: JSON -> 0 -> 'institution') :: TEXT
ELSE
''
END
, 'job_title', (CASE
WHEN U.jobs :: JSON -> 0 -> 'title' IS NOT NULL
THEN
(U.jobs :: JSON -> 0 -> 'title') :: TEXT
ELSE
''
END)
, 'all_jobs', (SELECT array_agg(DISTINCT (JOB :: JSON -> 'institution') :: TEXT)
FROM
(SELECT json_array_elements(jobs :: JSON) AS JOB
FROM osf_osfuser
WHERE id = U.id
) AS JOBS)
, 'school', (CASE
WHEN U.schools :: JSON -> 0 -> 'institution' IS NOT NULL
THEN
(U.schools :: JSON -> 0 -> 'institution') :: TEXT
ELSE
''
END)
, 'all_schools', (SELECT array_agg(DISTINCT (SCHOOL :: JSON -> 'institution') :: TEXT)
FROM
(SELECT json_array_elements(schools :: JSON) AS SCHOOL
FROM osf_osfuser
WHERE id = U.id
) AS SCHOOLS)
, 'category', 'user'
, 'degree', (CASE
WHEN U.schools :: JSON -> 0 -> 'degree' IS NOT NULL
THEN
(U.schools :: JSON -> 0 -> 'degree') :: TEXT
ELSE
''
END)
, 'social', (SELECT json_object_agg(
key,
(
CASE
WHEN key = 'orcid'
THEN 'http://orcid.org/' || value
WHEN key = 'github'
THEN 'http://github.com/' || value
WHEN key = 'scholar'
THEN 'http://scholar.google.com/citations?user=' || value
WHEN key = 'twitter'
THEN 'http://twitter.com/' || value
WHEN key = 'profileWebsites'
THEN value
WHEN key = 'linkedIn'
THEN 'https://www.linkedin.com/' || value
WHEN key = 'impactStory'
THEN 'https://impactstory.org/u/' || value
WHEN key = 'researcherId'
THEN 'http://researcherid.com/rid/' || value
WHEN key = 'researchGate'
THEN 'https://researchgate.net/profile/' || value
WHEN key = 'academiaInstitution'
THEN 'https://' || value
WHEN key = 'academiaProfileID'
THEN '.academia.edu/' || value
WHEN key = 'baiduScholar'
THEN 'http://xueshu.baidu.com/scholarID/' || value
WHEN key = 'ssrn'
THEN 'http://papers.ssrn.com/sol3/cf_dev/AbsByAuth.cfm?per_id=' || value
END
))
FROM jsonb_each_text(
(SELECT social
FROM osf_osfuser
WHERE id = U.id)
)
WHERE value IS NOT NULL
AND value != ''
AND value != '[]'
)
, 'boost', 2
)
)
)
FROM osf_osfuser AS U
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = U.id
AND content_type_id = ANY (SELECT id
FROM django_content_type
WHERE model = 'osfuser')
LIMIT 1
) USER_GUID ON TRUE
WHERE is_active = TRUE
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_DELETE_NODES_SQL = """
SELECT json_agg(
json_build_object(
'_type', CASE
WHEN N.type = 'osf.registration'
THEN 'registration'
WHEN PREPRINT.is_preprint > 0
THEN 'preprint'
WHEN PARENT_GUID._id IS NULL
THEN 'project'
ELSE 'component'
END
, '_index', '{index}'
, '_id', NODE_GUID._id
, '_op_type', 'delete'
)
)
FROM osf_abstractnode AS N
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = N.id
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'abstractnode')
LIMIT 1
) NODE_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = (
SELECT parent_id
FROM osf_noderelation
WHERE child_id = N.id
AND is_node_link = FALSE
LIMIT 1)
AND content_type_id = (SELECT id FROM django_content_type WHERE model = 'abstractnode')
LIMIT 1
) PARENT_GUID ON TRUE
LEFT JOIN LATERAL (
SELECT COUNT(P.id) as is_preprint
FROM osf_preprintservice P
WHERE P.node_id = N.id
AND P.machine_state != 'initial'
AND N.preprint_file_id IS NOT NULL
AND N.is_public = TRUE
AND N._is_preprint_orphan != TRUE
LIMIT 1
) PREPRINT ON TRUE
WHERE NOT ((TYPE = 'osf.node' OR TYPE = 'osf.registration')
AND N.is_public IS TRUE
AND N.is_deleted IS FALSE
AND (spam_status IS NULL OR NOT (spam_status = 2 or (spam_status = 1 AND {spam_flagged_removed_from_search})))
AND NOT (UPPER(N.title::text) LIKE UPPER('%Bulk stress 201%') OR UPPER(N.title::text) LIKE UPPER('%Bulk stress 202%') OR UPPER(N.title::text) LIKE UPPER('%OSF API Registration test%') -- is_qa_node
OR N.id IN -- Comes from website.settings.DO_NOT_INDEX_LIST
(SELECT THRUTAGS.abstractnode_id
FROM osf_abstractnode_tags THRUTAGS
INNER JOIN osf_tag TAGS ON (THRUTAGS.tag_id = TAGS.id)
WHERE (TAGS.name = 'qatest'
OR TAGS.name = 'qa test')))
AND NOT (N.id IN -- node.archiving
(SELECT AJ.dst_node_id -- May need to be made recursive as AJ table grows
FROM osf_archivejob AJ
WHERE (AJ.status != 'FAILURE' AND AJ.status != 'SUCCESS'
AND AJ.dst_node_id IS NOT NULL)))
)
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_DELETE_FILES_SQL = """
SELECT json_agg(json_build_object(
'_type', 'file'
, '_index', '{index}'
, '_id', F._id
, '_op_type', 'delete'
))
FROM osf_basefilenode AS F
WHERE NOT (name IS NOT NULL
AND name != ''
AND target_object_id = ANY (SELECT id
FROM osf_abstractnode
WHERE (TYPE = 'osf.node' OR TYPE = 'osf.registration' OR TYPE = 'osf.quickfilesnode')
AND is_public IS TRUE
AND is_deleted IS FALSE
AND (spam_status IS NULL OR NOT (spam_status = 2 or (spam_status = 1 AND {spam_flagged_removed_from_search})))
-- settings.SPAM_FLAGGED_REMOVE_FROM_SEARCH
-- node.archiving or is_qa_node
AND NOT (UPPER(osf_abstractnode.title::text) LIKE UPPER('%Bulk stress 201%') OR UPPER(osf_abstractnode.title::text) LIKE UPPER('%Bulk stress 202%') OR UPPER(osf_abstractnode.title::text) LIKE UPPER('%OSF API Registration test%') -- is_qa_node
OR "osf_abstractnode"."id" IN
(SELECT THRUTAGS.abstractnode_id
FROM osf_abstractnode_tags THRUTAGS
INNER JOIN osf_tag TAGS ON (THRUTAGS.tag_id = TAGS.id)
WHERE (TAGS.name = 'qatest'
OR TAGS.name = 'qa test')))
AND NOT (osf_abstractnode.id IN
(SELECT AJ.dst_node_id
FROM osf_archivejob AJ
WHERE (AJ.status != 'FAILURE' AND AJ.status != 'SUCCESS'
AND AJ.dst_node_id IS NOT NULL)))
)
)
AND target_content_type_id = (SELECT id FROM "django_content_type" WHERE ("django_content_type"."model" = 'abstractnode' AND "django_content_type"."app_label" = 'osf'))
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
JSON_DELETE_USERS_SQL = """
SELECT json_agg(
json_build_object(
'_type', 'user'
, '_index', '{index}'
, '_id', USER_GUID._id
, '_op_type', 'delete'
)
)
FROM osf_osfuser AS U
LEFT JOIN LATERAL (
SELECT _id
FROM osf_guid
WHERE object_id = U.id
AND content_type_id = ANY (SELECT id
FROM django_content_type
WHERE model = 'osfuser')
LIMIT 1
) USER_GUID ON TRUE
WHERE is_active != TRUE
AND id > {page_start}
AND id <= {page_end}
LIMIT 1;
"""
| apache-2.0 |
revmischa/boto | boto/ec2/autoscale/tag.py | 173 | 3379 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Tag(object):
"""
A name/value tag on an AutoScalingGroup resource.
:ivar key: The key of the tag.
:ivar value: The value of the tag.
:ivar propagate_at_launch: Boolean value which specifies whether the
new tag will be applied to instances launched after the tag is created.
:ivar resource_id: The name of the autoscaling group.
:ivar resource_type: The only supported resource type at this time
is "auto-scaling-group".
"""
def __init__(self, connection=None, key=None, value=None,
propagate_at_launch=False, resource_id=None,
resource_type='auto-scaling-group'):
self.connection = connection
self.key = key
self.value = value
self.propagate_at_launch = propagate_at_launch
self.resource_id = resource_id
self.resource_type = resource_type
def __repr__(self):
return 'Tag(%s=%s)' % (self.key, self.value)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Key':
self.key = value
elif name == 'Value':
self.value = value
elif name == 'PropagateAtLaunch':
if value.lower() == 'true':
self.propagate_at_launch = True
else:
self.propagate_at_launch = False
elif name == 'ResourceId':
self.resource_id = value
elif name == 'ResourceType':
self.resource_type = value
def build_params(self, params, i):
"""
Populates a dictionary with the name/value pairs necessary
to identify this Tag in a request.
"""
prefix = 'Tags.member.%d.' % i
params[prefix + 'ResourceId'] = self.resource_id
params[prefix + 'ResourceType'] = self.resource_type
params[prefix + 'Key'] = self.key
params[prefix + 'Value'] = self.value
if self.propagate_at_launch:
params[prefix + 'PropagateAtLaunch'] = 'true'
else:
params[prefix + 'PropagateAtLaunch'] = 'false'
def delete(self):
return self.connection.delete_tags([self])
| mit |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Xively/Triggers/ReadTrigger.py | 5 | 3479 | # -*- coding: utf-8 -*-
###############################################################################
#
# ReadTrigger
# Returns a specific trigger for the authenticated account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ReadTrigger(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ReadTrigger Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ReadTrigger, self).__init__(temboo_session, '/Library/Xively/Triggers/ReadTrigger')
def new_input_set(self):
return ReadTriggerInputSet()
def _make_result_set(self, result, path):
return ReadTriggerResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ReadTriggerChoreographyExecution(session, exec_id, path)
class ReadTriggerInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ReadTrigger
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Xively.)
"""
super(ReadTriggerInputSet, self)._set_input('APIKey', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "json" (the default) and "xml".)
"""
super(ReadTriggerInputSet, self)._set_input('ResponseFormat', value)
def set_TriggerID(self, value):
"""
Set the value of the TriggerID input for this Choreo. ((required, integer) ID for the trigger that you wish to retrieve information about.)
"""
super(ReadTriggerInputSet, self)._set_input('TriggerID', value)
class ReadTriggerResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ReadTrigger Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Xively.)
"""
return self._output.get('Response', None)
class ReadTriggerChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ReadTriggerResultSet(response, path)
| gpl-3.0 |
Blitzen/oauthlib | oauthlib/oauth2/rfc6749/request_validator.py | 33 | 19622 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import logging
log = logging.getLogger(__name__)
class RequestValidator(object):
def client_authentication_required(self, request, *args, **kwargs):
"""Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Refresh Token Grant
.. _`Section 4.3.2`: http://tools.ietf.org/html/rfc6749#section-4.3.2
.. _`Section 4.1.3`: http://tools.ietf.org/html/rfc6749#section-4.1.3
.. _`Section 6`: http://tools.ietf.org/html/rfc6749#section-6
"""
return True
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate client through means outside the OAuth 2 spec.
Means of authentication is negotiated beforehand and may for example
be `HTTP Basic Authentication Scheme`_ which utilizes the Authorization
header.
Headers may be accesses through request.headers and parameters found in
both body and query can be obtained by direct attribute access, i.e.
request.client_id for client_id in the URL query.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant (may be disabled)
- Client Credentials Grant
- Refresh Token Grant
.. _`HTTP Basic Authentication Scheme`: http://tools.ietf.org/html/rfc1945#section-11.1
"""
raise NotImplementedError('Subclasses must implement this method.')
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate
through other means, such as using HTTP Basic.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def confirm_redirect_uri(self, client_id, code, redirect_uri, client,
*args, **kwargs):
"""Ensure that the authorization process represented by this authorization
code began with this 'redirect_uri'.
If the client specifies a redirect_uri when obtaining code then that
redirect URI must be bound to the code and verified equal in this
method, according to RFC 6749 section 4.1.3. Do not compare against
the client's allowed redirect URIs, but against the URI used when the
code was saved.
:param client_id: Unicode client identifier
:param code: Unicode authorization_code.
:param redirect_uri: Unicode absolute URI
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (during token request)
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Get the default redirect URI for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Get the default scopes for the client.
:param client_id: Unicode client identifier
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of default scopes
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
:param refresh_token: Unicode refresh token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: List of scopes.
Method is used by:
- Refresh token grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def is_within_original_scope(self, request_scopes, refresh_token, request, *args, **kwargs):
"""Check if requested scopes are within a scope of the refresh token.
When access tokens are refreshed the scope of the new token
needs to be within the scope of the original token. This is
ensured by checking that all requested scopes strings are on
the list returned by the get_original_scopes. If this check
fails, is_within_original_scope is called. The method can be
used in situations where returning all valid scopes from the
get_original_scopes is not practical.
:param request_scopes: A list of scopes that were requested by client
:param refresh_token: Unicode refresh_token
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Refresh token grant
"""
return False
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Invalidate an authorization code after use.
:param client_id: Unicode client identifier
:param code: The authorization code grant (request.code).
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Revocation Endpoint
"""
raise NotImplementedError('Subclasses must implement this method.')
def rotate_refresh_token(self, request):
"""Determine whether to rotate the refresh token. Default, yes.
When access tokens are refreshed the old refresh token can be kept
or replaced with a new one (rotated). Return True to rotate and
and False for keeping original.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Refresh Token Grant
"""
return True
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Persist the authorization_code.
The code should at minimum be stored with:
- the client_id (client_id)
- the redirect URI used (request.redirect_uri)
- a resource owner / user (request.user)
- the authorized scopes (request.scopes)
- the client state, if given (code.get('state'))
The 'code' argument is actually a dictionary, containing at least a
'code' key with the actual authorization code:
{'code': 'sdf345jsdf0934f'}
It may also have a 'state' key containing a nonce for the client, if it
chose to send one. That value should be saved and used in
'validate_code'.
:param client_id: Unicode client identifier
:param code: A dict of the authorization code grant and, optionally, state.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token.
The Bearer token should at minimum be associated with:
- a client and it's client_id, if available
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
- an expiration time
- a refresh token, if issued
The Bearer token dict may hold a number of items::
{
'token_type': 'Bearer',
'access_token': 'askfjh234as9sd8',
'expires_in': 3600,
'scope': 'string of space separated authorized scopes',
'refresh_token': '23sdf876234', # if issued
'state': 'given_by_client', # if supplied by client
}
Note that while "scope" is a string-separated list of authorized scopes,
the original list is still available in request.scopes
:param client_id: Unicode client identifier
:param token: A Bearer token dict
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: The default redirect URI for the client
Method is used by all core grant types issuing Bearer tokens:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant (might not associate a client)
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_bearer_token(self, token, scopes, request):
"""Ensure the Bearer token is valid and authorized access to scopes.
:param token: A string of random characters.
:param scopes: A list of scopes associated with the protected resource.
:param request: The HTTP Request (oauthlib.common.Request)
A key to OAuth 2 security and restricting impact of leaked tokens is
the short expiration time of tokens, *always ensure the token has not
expired!*.
Two different approaches to scope validation:
1) all(scopes). The token must be authorized access to all scopes
associated with the resource. For example, the
token has access to ``read-only`` and ``images``,
thus the client can view images but not upload new.
Allows for fine grained access control through
combining various scopes.
2) any(scopes). The token must be authorized access to one of the
scopes associated with the resource. For example,
token has access to ``read-only-images``.
Allows for fine grained, although arguably less
convenient, access control.
A powerful way to use scopes would mimic UNIX ACLs and see a scope
as a group with certain privileges. For a restful API these might
map to HTTP verbs instead of read, write and execute.
Note, the request.user attribute can be set to the resource owner
associated with this token. Similarly the request.client and
request.scopes attribute can be set to associated client object
and authorized scopes. If you then use a decorator such as the
one provided for django these attributes will be made available
in all protected views as keyword arguments.
:param token: Unicode Bearer token
:param scopes: List of scopes (defined by you)
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is indirectly used by all core Bearer token issuing grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Verify that the authorization_code is valid and assigned to the given
client.
Before returning true, set the following based on the information stored
with the code in 'save_authorization_code':
- request.user
- request.state (if given)
- request.scopes
OBS! The request.user attribute should be set to the resource owner
associated with this authorization code. Similarly request.scopes
must also be set.
:param client_id: Unicode client identifier
:param code: Unicode authorization code
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the grant_type requested.
:param client_id: Unicode client identifier
:param grant_type: Unicode grant type, i.e. authorization_code, password.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier
:param redirect_uri: Unicode absolute URI
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes.
OBS! The request.user attribute should be set to the resource owner
associated with this refresh token.
:param refresh_token: Unicode refresh token
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant (indirectly by issuing refresh tokens)
- Resource Owner Password Credentials Grant (also indirectly)
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the response_type requested.
:param client_id: Unicode client identifier
:param response_type: Unicode response type, i.e. code, token.
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""Ensure the client is authorized access to requested scopes.
:param client_id: Unicode client identifier
:param scopes: List of scopes (defined by you)
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_user(self, username, password, client, request, *args, **kwargs):
"""Ensure the username and password is valid.
OBS! The validation should also set the user attribute of the request
to a valid resource owner, i.e. request.user = username or similar. If
not set you will be unable to associate a token with a user in the
persistance method used (commonly, save_bearer_token).
:param username: Unicode username
:param password: Unicode password
:param client: Client object set by you, see authenticate_client.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- Resource Owner Password Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
| bsd-3-clause |
evanma92/routeh | flask/lib/python2.7/site-packages/whoosh/filedb/filetables.py | 52 | 25256 | # Copyright 2009 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""This module defines writer and reader classes for a fast, immutable
on-disk key-value database format. The current format is based heavily on
D. J. Bernstein's CDB format (http://cr.yp.to/cdb.html).
"""
import os, struct
from binascii import crc32
from bisect import bisect_left
from hashlib import md5 # @UnresolvedImport
from whoosh.compat import b, bytes_type
from whoosh.compat import xrange
from whoosh.util.numlists import GrowableArray
from whoosh.system import _INT_SIZE, emptybytes
# Exceptions
class FileFormatError(Exception):
pass
# Hash functions
def cdb_hash(key):
h = 5381
for c in key:
h = (h + (h << 5)) & 0xffffffff ^ ord(c)
return h
def md5_hash(key):
return int(md5(key).hexdigest(), 16) & 0xffffffff
def crc_hash(key):
return crc32(key) & 0xffffffff
_hash_functions = (md5_hash, crc_hash, cdb_hash)
# Structs
# Two uints before the key/value pair giving the length of the key and value
_lengths = struct.Struct("!ii")
# A pointer in a hash table, giving the hash value and the key position
_pointer = struct.Struct("!Iq")
# A pointer in the hash table directory, giving the position and number of slots
_dir_entry = struct.Struct("!qi")
_directory_size = 256 * _dir_entry.size
# Basic hash file
class HashWriter(object):
"""Implements a fast on-disk key-value store. This hash uses a two-level
hashing scheme, where a key is hashed, the low eight bits of the hash value
are used to index into one of 256 hash tables. This is basically the CDB
algorithm, but unlike CDB this object writes all data serially (it doesn't
seek backwards to overwrite information at the end).
Also unlike CDB, this format uses 64-bit file pointers, so the file length
is essentially unlimited. However, each key and value must be less than
2 GB in length.
"""
def __init__(self, dbfile, magic=b("HSH3"), hashtype=0):
"""
:param dbfile: a :class:`~whoosh.filedb.structfile.StructFile` object
to write to.
:param magic: the format tag bytes to write at the start of the file.
:param hashtype: an integer indicating which hashing algorithm to use.
Possible values are 0 (MD5), 1 (CRC32), or 2 (CDB hash).
"""
self.dbfile = dbfile
self.hashtype = hashtype
self.hashfn = _hash_functions[self.hashtype]
# A place for subclasses to put extra metadata
self.extras = {}
self.startoffset = dbfile.tell()
# Write format tag
dbfile.write(magic)
# Write hash type
dbfile.write_byte(self.hashtype)
# Unused future expansion bits
dbfile.write_int(0)
dbfile.write_int(0)
# 256 lists of hashed keys and positions
self.buckets = [[] for _ in xrange(256)]
# List to remember the positions of the hash tables
self.directory = []
def tell(self):
return self.dbfile.tell()
def add(self, key, value):
"""Adds a key/value pair to the file. Note that keys DO NOT need to be
unique. You can store multiple values under the same key and retrieve
them using :meth:`HashReader.all`.
"""
assert isinstance(key, bytes_type)
assert isinstance(value, bytes_type)
dbfile = self.dbfile
pos = dbfile.tell()
dbfile.write(_lengths.pack(len(key), len(value)))
dbfile.write(key)
dbfile.write(value)
# Get hash value for the key
h = self.hashfn(key)
# Add hash and on-disk position to appropriate bucket
self.buckets[h & 255].append((h, pos))
def add_all(self, items):
"""Convenience method to add a sequence of ``(key, value)`` pairs. This
is the same as calling :meth:`HashWriter.add` on each pair in the
sequence.
"""
add = self.add
for key, value in items:
add(key, value)
def _write_hashes(self):
# Writes 256 hash tables containing pointers to the key/value pairs
dbfile = self.dbfile
# Represent and empty slot in the hash table using 0,0 (no key can
# start at position 0 because of the header)
null = (0, 0)
for entries in self.buckets:
# Start position of this bucket's hash table
pos = dbfile.tell()
# Remember the start position and the number of slots
numslots = 2 * len(entries)
self.directory.append((pos, numslots))
# Create the empty hash table
hashtable = [null] * numslots
# For each (hash value, key position) tuple in the bucket
for hashval, position in entries:
# Bitshift and wrap to get the slot for this entry
slot = (hashval >> 8) % numslots
# If the slot is taken, keep going until we find an empty slot
while hashtable[slot] != null:
slot = (slot + 1) % numslots
# Insert the entry into the hashtable
hashtable[slot] = (hashval, position)
# Write the hash table for this bucket to disk
for hashval, position in hashtable:
dbfile.write(_pointer.pack(hashval, position))
def _write_directory(self):
# Writes a directory of pointers to the 256 hash tables
dbfile = self.dbfile
for position, numslots in self.directory:
dbfile.write(_dir_entry.pack(position, numslots))
def _write_extras(self):
self.dbfile.write_pickle(self.extras)
def close(self):
dbfile = self.dbfile
# Write hash tables
self._write_hashes()
# Write directory of pointers to hash tables
self._write_directory()
expos = dbfile.tell()
# Write extra information
self._write_extras()
# Write length of pickle
dbfile.write_int(dbfile.tell() - expos)
endpos = dbfile.tell()
dbfile.close()
return endpos
class HashReader(object):
"""Reader for the fast on-disk key-value files created by
:class:`HashWriter`.
"""
def __init__(self, dbfile, length=None, magic=b("HSH3"), startoffset=0):
"""
:param dbfile: a :class:`~whoosh.filedb.structfile.StructFile` object
to read from.
:param length: the length of the file data. This is necessary since the
hashing information is written at the end of the file.
:param magic: the format tag bytes to look for at the start of the
file. If the file's format tag does not match these bytes, the
object raises a :class:`FileFormatError` exception.
:param startoffset: the starting point of the file data.
"""
self.dbfile = dbfile
self.startoffset = startoffset
self.is_closed = False
if length is None:
dbfile.seek(0, os.SEEK_END)
length = dbfile.tell() - startoffset
dbfile.seek(startoffset)
# Check format tag
filemagic = dbfile.read(4)
if filemagic != magic:
raise FileFormatError("Unknown file header %r" % filemagic)
# Read hash type
self.hashtype = dbfile.read_byte()
self.hashfn = _hash_functions[self.hashtype]
# Skip unused future expansion bits
dbfile.read_int()
dbfile.read_int()
self.startofdata = dbfile.tell()
exptr = startoffset + length - _INT_SIZE
# Get the length of extras from the end of the file
exlen = dbfile.get_int(exptr)
# Read the extras
expos = exptr - exlen
dbfile.seek(expos)
self._read_extras()
# Calculate the directory base from the beginning of the extras
dbfile.seek(expos - _directory_size)
# Read directory of hash tables
self.tables = []
entrysize = _dir_entry.size
unpackentry = _dir_entry.unpack
for _ in xrange(256):
# position, numslots
self.tables.append(unpackentry(dbfile.read(entrysize)))
# The position of the first hash table is the end of the key/value pairs
self.endofdata = self.tables[0][0]
@classmethod
def open(cls, storage, name):
"""Convenience method to open a hash file given a
:class:`whoosh.filedb.filestore.Storage` object and a name. This takes
care of opening the file and passing its length to the initializer.
"""
length = storage.file_length(name)
dbfile = storage.open_file(name)
return cls(dbfile, length)
def file(self):
return self.dbfile
def _read_extras(self):
try:
self.extras = self.dbfile.read_pickle()
except EOFError:
self.extras = {}
def close(self):
if self.is_closed:
raise Exception("Tried to close %r twice" % self)
self.dbfile.close()
self.is_closed = True
def key_at(self, pos):
# Returns the key bytes at the given position
dbfile = self.dbfile
keylen = dbfile.get_uint(pos)
return dbfile.get(pos + _lengths.size, keylen)
def key_and_range_at(self, pos):
# Returns a (keybytes, datapos, datalen) tuple for the key at the given
# position
dbfile = self.dbfile
lenssize = _lengths.size
if pos >= self.endofdata:
return None
keylen, datalen = _lengths.unpack(dbfile.get(pos, lenssize))
keybytes = dbfile.get(pos + lenssize, keylen)
datapos = pos + lenssize + keylen
return keybytes, datapos, datalen
def _ranges(self, pos=None, eod=None):
# Yields a series of (keypos, keylength, datapos, datalength) tuples
# for the key/value pairs in the file
dbfile = self.dbfile
pos = pos or self.startofdata
eod = eod or self.endofdata
lenssize = _lengths.size
unpacklens = _lengths.unpack
while pos < eod:
keylen, datalen = unpacklens(dbfile.get(pos, lenssize))
keypos = pos + lenssize
datapos = keypos + keylen
yield (keypos, keylen, datapos, datalen)
pos = datapos + datalen
def __getitem__(self, key):
for value in self.all(key):
return value
raise KeyError(key)
def __iter__(self):
dbfile = self.dbfile
for keypos, keylen, datapos, datalen in self._ranges():
key = dbfile.get(keypos, keylen)
value = dbfile.get(datapos, datalen)
yield (key, value)
def __contains__(self, key):
for _ in self.ranges_for_key(key):
return True
return False
def keys(self):
dbfile = self.dbfile
for keypos, keylen, _, _ in self._ranges():
yield dbfile.get(keypos, keylen)
def values(self):
dbfile = self.dbfile
for _, _, datapos, datalen in self._ranges():
yield dbfile.get(datapos, datalen)
def items(self):
dbfile = self.dbfile
for keypos, keylen, datapos, datalen in self._ranges():
yield (dbfile.get(keypos, keylen), dbfile.get(datapos, datalen))
def get(self, key, default=None):
for value in self.all(key):
return value
return default
def all(self, key):
"""Yields a sequence of values associated with the given key.
"""
dbfile = self.dbfile
for datapos, datalen in self.ranges_for_key(key):
yield dbfile.get(datapos, datalen)
def ranges_for_key(self, key):
"""Yields a sequence of ``(datapos, datalength)`` tuples associated
with the given key.
"""
if not isinstance(key, bytes_type):
raise TypeError("Key %r should be bytes" % key)
dbfile = self.dbfile
# Hash the key
keyhash = self.hashfn(key)
# Get the position and number of slots for the hash table in which the
# key may be found
tablestart, numslots = self.tables[keyhash & 255]
# If the hash table is empty, we know the key doesn't exists
if not numslots:
return
ptrsize = _pointer.size
unpackptr = _pointer.unpack
lenssize = _lengths.size
unpacklens = _lengths.unpack
# Calculate where the key's slot should be
slotpos = tablestart + (((keyhash >> 8) % numslots) * ptrsize)
# Read slots looking for our key's hash value
for _ in xrange(numslots):
slothash, itempos = unpackptr(dbfile.get(slotpos, ptrsize))
# If this slot is empty, we're done
if not itempos:
return
# If the key hash in this slot matches our key's hash, we might have
# a match, so read the actual key and see if it's our key
if slothash == keyhash:
# Read the key and value lengths
keylen, datalen = unpacklens(dbfile.get(itempos, lenssize))
# Only bother reading the actual key if the lengths match
if keylen == len(key):
keystart = itempos + lenssize
if key == dbfile.get(keystart, keylen):
# The keys match, so yield (datapos, datalen)
yield (keystart + keylen, datalen)
slotpos += ptrsize
# If we reach the end of the hashtable, wrap around
if slotpos == tablestart + (numslots * ptrsize):
slotpos = tablestart
def range_for_key(self, key):
for item in self.ranges_for_key(key):
return item
raise KeyError(key)
# Ordered hash file
class OrderedHashWriter(HashWriter):
"""Implements an on-disk hash, but requires that keys be added in order.
An :class:`OrderedHashReader` can then look up "nearest keys" based on
the ordering.
"""
def __init__(self, dbfile):
HashWriter.__init__(self, dbfile)
# Keep an array of the positions of all keys
self.index = GrowableArray("H")
# Keep track of the last key added
self.lastkey = emptybytes
def add(self, key, value):
if key <= self.lastkey:
raise ValueError("Keys must increase: %r..%r"
% (self.lastkey, key))
self.index.append(self.dbfile.tell())
HashWriter.add(self, key, value)
self.lastkey = key
def _write_extras(self):
dbfile = self.dbfile
index = self.index
# Store metadata about the index array
self.extras["indextype"] = index.typecode
self.extras["indexlen"] = len(index)
# Write the extras
HashWriter._write_extras(self)
# Write the index array
index.to_file(dbfile)
class OrderedHashReader(HashReader):
def closest_key(self, key):
"""Returns the closest key equal to or greater than the given key. If
there is no key in the file equal to or greater than the given key,
returns None.
"""
pos = self.closest_key_pos(key)
if pos is None:
return None
return self.key_at(pos)
def ranges_from(self, key):
"""Yields a series of ``(keypos, keylen, datapos, datalen)`` tuples
for the ordered series of keys equal or greater than the given key.
"""
pos = self.closest_key_pos(key)
if pos is None:
return
for item in self._ranges(pos=pos):
yield item
def keys_from(self, key):
"""Yields an ordered series of keys equal to or greater than the given
key.
"""
dbfile = self.dbfile
for keypos, keylen, _, _ in self.ranges_from(key):
yield dbfile.get(keypos, keylen)
def items_from(self, key):
"""Yields an ordered series of ``(key, value)`` tuples for keys equal
to or greater than the given key.
"""
dbfile = self.dbfile
for keypos, keylen, datapos, datalen in self.ranges_from(key):
yield (dbfile.get(keypos, keylen), dbfile.get(datapos, datalen))
def _read_extras(self):
dbfile = self.dbfile
# Read the extras
HashReader._read_extras(self)
# Set up for reading the index array
indextype = self.extras["indextype"]
self.indexbase = dbfile.tell()
self.indexlen = self.extras["indexlen"]
self.indexsize = struct.calcsize(indextype)
# Set up the function to read values from the index array
if indextype == "B":
self._get_pos = dbfile.get_byte
elif indextype == "H":
self._get_pos = dbfile.get_ushort
elif indextype == "i":
self._get_pos = dbfile.get_int
elif indextype == "I":
self._get_pos = dbfile.get_uint
elif indextype == "q":
self._get_pos = dbfile.get_long
else:
raise Exception("Unknown index type %r" % indextype)
def closest_key_pos(self, key):
# Given a key, return the position of that key OR the next highest key
# if the given key does not exist
if not isinstance(key, bytes_type):
raise TypeError("Key %r should be bytes" % key)
indexbase = self.indexbase
indexsize = self.indexsize
key_at = self.key_at
_get_pos = self._get_pos
# Do a binary search of the positions in the index array
lo = 0
hi = self.indexlen
while lo < hi:
mid = (lo + hi) // 2
midkey = key_at(_get_pos(indexbase + mid * indexsize))
if midkey < key:
lo = mid + 1
else:
hi = mid
# If we went off the end, return None
if lo == self.indexlen:
return None
# Return the closest key
return _get_pos(indexbase + lo * indexsize)
# Fielded Ordered hash file
class FieldedOrderedHashWriter(HashWriter):
"""Implements an on-disk hash, but writes separate position indexes for
each field.
"""
def __init__(self, dbfile):
HashWriter.__init__(self, dbfile)
# Map field names to (startpos, indexpos, length, typecode)
self.fieldmap = self.extras["fieldmap"] = {}
# Keep track of the last key added
self.lastkey = emptybytes
def start_field(self, fieldname):
self.fieldstart = self.dbfile.tell()
self.fieldname = fieldname
# Keep an array of the positions of all keys
self.poses = GrowableArray("H")
self.lastkey = emptybytes
def add(self, key, value):
if key <= self.lastkey:
raise ValueError("Keys must increase: %r..%r"
% (self.lastkey, key))
self.poses.append(self.dbfile.tell() - self.fieldstart)
HashWriter.add(self, key, value)
self.lastkey = key
def end_field(self):
dbfile = self.dbfile
fieldname = self.fieldname
poses = self.poses
self.fieldmap[fieldname] = (self.fieldstart, dbfile.tell(), len(poses),
poses.typecode)
poses.to_file(dbfile)
class FieldedOrderedHashReader(HashReader):
def __init__(self, *args, **kwargs):
HashReader.__init__(self, *args, **kwargs)
self.fieldmap = self.extras["fieldmap"]
# Make a sorted list of the field names with their start and end ranges
self.fieldlist = []
for fieldname in sorted(self.fieldmap.keys()):
startpos, ixpos, ixsize, ixtype = self.fieldmap[fieldname]
self.fieldlist.append((fieldname, startpos, ixpos))
def field_start(self, fieldname):
return self.fieldmap[fieldname][0]
def fielded_ranges(self, pos=None, eod=None):
flist = self.fieldlist
fpos = 0
fieldname, start, end = flist[fpos]
for keypos, keylen, datapos, datalen in self._ranges(pos, eod):
if keypos >= end:
fpos += 1
fieldname, start, end = flist[fpos]
yield fieldname, keypos, keylen, datapos, datalen
def iter_terms(self):
get = self.dbfile.get
for fieldname, keypos, keylen, _, _ in self.fielded_ranges():
yield fieldname, get(keypos, keylen)
def iter_term_items(self):
get = self.dbfile.get
for item in self.fielded_ranges():
fieldname, keypos, keylen, datapos, datalen = item
yield fieldname, get(keypos, keylen), get(datapos, datalen)
def contains_term(self, fieldname, btext):
try:
x = self.range_for_term(fieldname, btext)
return True
except KeyError:
return False
def range_for_term(self, fieldname, btext):
start, ixpos, ixsize, code = self.fieldmap[fieldname]
for datapos, datalen in self.ranges_for_key(btext):
if start < datapos < ixpos:
return datapos, datalen
raise KeyError((fieldname, btext))
def term_data(self, fieldname, btext):
datapos, datalen = self.range_for_term(fieldname, btext)
return self.dbfile.get(datapos, datalen)
def term_get(self, fieldname, btext, default=None):
try:
return self.term_data(fieldname, btext)
except KeyError:
return default
def closest_term_pos(self, fieldname, key):
# Given a key, return the position of that key OR the next highest key
# if the given key does not exist
if not isinstance(key, bytes_type):
raise TypeError("Key %r should be bytes" % key)
dbfile = self.dbfile
key_at = self.key_at
startpos, ixpos, ixsize, ixtype = self.fieldmap[fieldname]
if ixtype == "B":
get_pos = dbfile.get_byte
elif ixtype == "H":
get_pos = dbfile.get_ushort
elif ixtype == "i":
get_pos = dbfile.get_int
elif ixtype == "I":
get_pos = dbfile.get_uint
elif ixtype == "q":
get_pos = dbfile.get_long
else:
raise Exception("Unknown index type %r" % ixtype)
# Do a binary search of the positions in the index array
lo = 0
hi = ixsize
while lo < hi:
mid = (lo + hi) // 2
midkey = key_at(startpos + get_pos(ixpos + mid * ixsize))
if midkey < key:
lo = mid + 1
else:
hi = mid
# If we went off the end, return None
if lo == ixsize:
return None
# Return the closest key
return startpos + get_pos(ixpos + lo * ixsize)
def closest_term(self, fieldname, btext):
pos = self.closest_term_pos(fieldname, btext)
if pos is None:
return None
return self.key_at(pos)
def term_ranges_from(self, fieldname, btext):
pos = self.closest_term_pos(fieldname, btext)
if pos is None:
return
startpos, ixpos, ixsize, ixtype = self.fieldmap[fieldname]
for item in self._ranges(pos, ixpos):
yield item
def terms_from(self, fieldname, btext):
dbfile = self.dbfile
for keypos, keylen, _, _ in self.term_ranges_from(fieldname, btext):
yield dbfile.get(keypos, keylen)
def term_items_from(self, fieldname, btext):
dbfile = self.dbfile
for item in self.term_ranges_from(fieldname, btext):
keypos, keylen, datapos, datalen = item
yield (dbfile.get(keypos, keylen), dbfile.get(datapos, datalen))
| bsd-3-clause |
liuming9283/SmartGateway | serial_python/main_entry.py | 1 | 1410 | #!/usr/bin/env python
# coding:utf-8
# 项目的主程序入口,利用python多进程
# created by webber 2017/1/13
import sys
import multiprocessing
import httpclient
import http_multi_threading
import gateway_monitor
import websocket_test
import tcp_wifi
import tcp_control
plist = []
def worker_1():
print '=========worker---1 start !!!=========='
# httpclient.main()
# http_multi_process()
http_multi_threading.main()
def worker_2():
print '=========worker---2 start !!!=========='
gateway_monitor.main()
def worker_3():
print '=========worker---3 start !!!=========='
websocket_test.main()
def worker_4():
print '=========worker---4 start !!!=========='
tcp_wifi.main()
def worker_5():
print '=========worker---5 start !!!=========='
tcp_control.main()
def main():
p1 = multiprocessing.Process(target=worker_1,args=()) # httpclient
plist.append(p1)
p2 = multiprocessing.Process(target=worker_2,args=()) # gateway_monitor
plist.append(p2)
p3 = multiprocessing.Process(target=worker_3,args=()) # websocket
plist.append(p3)
p4 = multiprocessing.Process(target=worker_4,args=()) # tcp_wifi
plist.append(p4)
p5 = multiprocessing.Process(target=worker_5,args=()) # tcp_control
plist.append(p5)
for p in plist:
p.start()
p.join()
if __name__ == '__main__':
main()
| gpl-3.0 |
tun4f1sh/virtuous-kernel-7x30-gingerbread | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
lafayette/JBTT | framework/python/Lib/re.py | 7 | 12841 | #
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9].
\D Matches any non-digit character; equivalent to the set [^0-9].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
\S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "I", "L", "M", "S", "X",
"U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, 0).sub(repl, string, count)
def subn(pattern, repl, string, count=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, 0).subn(repl, string, count)
def split(pattern, string, maxsplit=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, 0).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression cache"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum = {}
for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890':
_alphanum[c] = 1
del c
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i in range(len(pattern)):
c = pattern[i]
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 100
def _compile(*key):
# internal: compile pattern
cachekey = (type(key[0]),) + key
p = _cache.get(cachekey)
if p is not None:
return p
pattern, flags = key
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError('Cannot process flags argument with a compiled pattern')
return pattern
if not sre_compile.isstring(pattern):
raise TypeError, "first argument must be string or compiled pattern"
try:
p = sre_compile.compile(pattern, flags)
except error, v:
raise error, v # invalid expression
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[cachekey] = p
return p
def _compile_repl(*key):
# internal: compile replacement pattern
p = _cache_repl.get(key)
if p is not None:
return p
repl, pattern = key
try:
p = sre_parse.parse_template(repl, pattern)
except error, v:
raise error, v # invalid expression
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl[key] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copy_reg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copy_reg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if hasattr(action, '__call__'):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
| mit |
kytvi2p/tahoe-lafs | setuptools-0.6c16dev4.egg/setuptools/site-patch.py | 7 | 2327 | def __boot():
import sys, imp, os, os.path
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d,nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p,np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| gpl-2.0 |
barry-scott/git-workbench | Source/Common/wb_debug.py | 2 | 2965 | '''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_debug.py
'''
import time
class WbDebugOption:
__slots__ = ('__enabled', '_log', '__name', '__fmt')
def __init__( self, log, name ):
assert log is not None
self.__enabled = False
self._log = log
self.__name = name
self.__fmt = '%s %%s' % (name,)
def __repr__( self ):
return '<WbDebugOption: %s enabled=%r>' % (self.__name, self.isEnabled())
def enable( self, state=True ):
self.__enabled = state
def isEnabled( self ):
return self.__enabled
def __bool__( self ):
return self.__enabled
def __call__( self, msg ):
if self.__enabled:
self._log.debug( self.__fmt % (msg,) )
class WbDebugSpeedOption(WbDebugOption):
__slots__ = ('__speed_start_time', '__speed_last_event_time')
def __init__( self, log, name ):
super().__init__( log, name )
self.__speed_start_time = time.time()
self.__speed_last_event_time = self.__speed_start_time
def __call__( self, msg, start_timer=False ):
if self.isEnabled():
now = time.time()
if start_timer:
self.__speed_start_time = now
self.__speed_last_event_time = now
start_delta = now - self.__speed_start_time
last_delta = now - self.__speed_last_event_time
self.__speed_last_event_time = now
self._log.debug( 'SPEED %.6f %.6f %s' % (start_delta, last_delta, msg,) )
class WbDebug:
def __init__( self, log ):
self._log = log
self.debugLogSpeed = WbDebugSpeedOption( self._log, 'SPEED' )
self.debugLogApp = self.addDebugOption( 'APP' )
self.debugLogThreading = self.addDebugOption( 'THREADING' )
self.debugLogMainWindow = self.addDebugOption( 'MAIN WINDOW' )
self.debugLogTreeModel = self.addDebugOption( 'TREE MODEL' )
self.debugLogTreeModelNode = self.addDebugOption( 'TREE MODEL NODE' )
self.debugLogTableModel = self.addDebugOption( 'TABLE MODEL' )
self.debugLogDiff = self.addDebugOption( 'DIFF' )
def setDebug( self, str_options ):
for option in [s.strip().lower() for s in str_options.split(',')]:
name = 'debugLog%s' % (''.join( s.capitalize() for s in option.lower().split('-') ),)
if hasattr( self, name ):
getattr( self, name ).enable( True )
else:
msg = 'Unknown debug option %s - see wb_debug.py for available options' % (option,)
print( msg )
def addDebugOption( self, name ):
return WbDebugOption( self._log, name )
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/aio/_compute_management_client.py | 1 | 13836 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import ComputeManagementClientConfiguration
from .operations import Operations
from .operations import AvailabilitySetsOperations
from .operations import ProximityPlacementGroupsOperations
from .operations import DedicatedHostGroupsOperations
from .operations import DedicatedHostsOperations
from .operations import VirtualMachineExtensionImagesOperations
from .operations import VirtualMachineExtensionsOperations
from .operations import VirtualMachineImagesOperations
from .operations import UsageOperations
from .operations import VirtualMachinesOperations
from .operations import VirtualMachineSizesOperations
from .operations import ImagesOperations
from .operations import VirtualMachineScaleSetsOperations
from .operations import VirtualMachineScaleSetExtensionsOperations
from .operations import VirtualMachineScaleSetRollingUpgradesOperations
from .operations import VirtualMachineScaleSetVMExtensionsOperations
from .operations import VirtualMachineScaleSetVMsOperations
from .operations import LogAnalyticsOperations
from .operations import DisksOperations
from .operations import SnapshotsOperations
from .operations import DiskEncryptionSetsOperations
from .operations import GalleriesOperations
from .operations import GalleryImagesOperations
from .operations import GalleryImageVersionsOperations
from .operations import GalleryApplicationsOperations
from .operations import GalleryApplicationVersionsOperations
from .operations import VirtualMachineRunCommandsOperations
from .. import models
class ComputeManagementClient(object):
"""Compute Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.compute.v2019_07_01.aio.operations.Operations
:ivar availability_sets: AvailabilitySetsOperations operations
:vartype availability_sets: azure.mgmt.compute.v2019_07_01.aio.operations.AvailabilitySetsOperations
:ivar proximity_placement_groups: ProximityPlacementGroupsOperations operations
:vartype proximity_placement_groups: azure.mgmt.compute.v2019_07_01.aio.operations.ProximityPlacementGroupsOperations
:ivar dedicated_host_groups: DedicatedHostGroupsOperations operations
:vartype dedicated_host_groups: azure.mgmt.compute.v2019_07_01.aio.operations.DedicatedHostGroupsOperations
:ivar dedicated_hosts: DedicatedHostsOperations operations
:vartype dedicated_hosts: azure.mgmt.compute.v2019_07_01.aio.operations.DedicatedHostsOperations
:ivar virtual_machine_extension_images: VirtualMachineExtensionImagesOperations operations
:vartype virtual_machine_extension_images: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineExtensionImagesOperations
:ivar virtual_machine_extensions: VirtualMachineExtensionsOperations operations
:vartype virtual_machine_extensions: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineExtensionsOperations
:ivar virtual_machine_images: VirtualMachineImagesOperations operations
:vartype virtual_machine_images: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineImagesOperations
:ivar usage: UsageOperations operations
:vartype usage: azure.mgmt.compute.v2019_07_01.aio.operations.UsageOperations
:ivar virtual_machines: VirtualMachinesOperations operations
:vartype virtual_machines: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachinesOperations
:ivar virtual_machine_sizes: VirtualMachineSizesOperations operations
:vartype virtual_machine_sizes: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineSizesOperations
:ivar images: ImagesOperations operations
:vartype images: azure.mgmt.compute.v2019_07_01.aio.operations.ImagesOperations
:ivar virtual_machine_scale_sets: VirtualMachineScaleSetsOperations operations
:vartype virtual_machine_scale_sets: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetsOperations
:ivar virtual_machine_scale_set_extensions: VirtualMachineScaleSetExtensionsOperations operations
:vartype virtual_machine_scale_set_extensions: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetExtensionsOperations
:ivar virtual_machine_scale_set_rolling_upgrades: VirtualMachineScaleSetRollingUpgradesOperations operations
:vartype virtual_machine_scale_set_rolling_upgrades: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetRollingUpgradesOperations
:ivar virtual_machine_scale_set_vm_extensions: VirtualMachineScaleSetVMExtensionsOperations operations
:vartype virtual_machine_scale_set_vm_extensions: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetVMExtensionsOperations
:ivar virtual_machine_scale_set_vms: VirtualMachineScaleSetVMsOperations operations
:vartype virtual_machine_scale_set_vms: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineScaleSetVMsOperations
:ivar log_analytics: LogAnalyticsOperations operations
:vartype log_analytics: azure.mgmt.compute.v2019_07_01.aio.operations.LogAnalyticsOperations
:ivar disks: DisksOperations operations
:vartype disks: azure.mgmt.compute.v2019_07_01.aio.operations.DisksOperations
:ivar snapshots: SnapshotsOperations operations
:vartype snapshots: azure.mgmt.compute.v2019_07_01.aio.operations.SnapshotsOperations
:ivar disk_encryption_sets: DiskEncryptionSetsOperations operations
:vartype disk_encryption_sets: azure.mgmt.compute.v2019_07_01.aio.operations.DiskEncryptionSetsOperations
:ivar galleries: GalleriesOperations operations
:vartype galleries: azure.mgmt.compute.v2019_07_01.aio.operations.GalleriesOperations
:ivar gallery_images: GalleryImagesOperations operations
:vartype gallery_images: azure.mgmt.compute.v2019_07_01.aio.operations.GalleryImagesOperations
:ivar gallery_image_versions: GalleryImageVersionsOperations operations
:vartype gallery_image_versions: azure.mgmt.compute.v2019_07_01.aio.operations.GalleryImageVersionsOperations
:ivar gallery_applications: GalleryApplicationsOperations operations
:vartype gallery_applications: azure.mgmt.compute.v2019_07_01.aio.operations.GalleryApplicationsOperations
:ivar gallery_application_versions: GalleryApplicationVersionsOperations operations
:vartype gallery_application_versions: azure.mgmt.compute.v2019_07_01.aio.operations.GalleryApplicationVersionsOperations
:ivar virtual_machine_run_commands: VirtualMachineRunCommandsOperations operations
:vartype virtual_machine_run_commands: azure.mgmt.compute.v2019_07_01.aio.operations.VirtualMachineRunCommandsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ComputeManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.availability_sets = AvailabilitySetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.proximity_placement_groups = ProximityPlacementGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.dedicated_host_groups = DedicatedHostGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.dedicated_hosts = DedicatedHostsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_extension_images = VirtualMachineExtensionImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_extensions = VirtualMachineExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_images = VirtualMachineImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machines = VirtualMachinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_sizes = VirtualMachineSizesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.images = ImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_sets = VirtualMachineScaleSetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_extensions = VirtualMachineScaleSetExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_rolling_upgrades = VirtualMachineScaleSetRollingUpgradesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_vm_extensions = VirtualMachineScaleSetVMExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_scale_set_vms = VirtualMachineScaleSetVMsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.log_analytics = LogAnalyticsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.disks = DisksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.disk_encryption_sets = DiskEncryptionSetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.galleries = GalleriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.gallery_images = GalleryImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.gallery_image_versions = GalleryImageVersionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.gallery_applications = GalleryApplicationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.gallery_application_versions = GalleryApplicationVersionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_machine_run_commands = VirtualMachineRunCommandsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ComputeManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| mit |
sivakuna-aap/superdesk-core | superdesk/eve_backend.py | 2 | 8327 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from flask import current_app as app
from eve.utils import document_etag, config, ParsedRequest
from eve.io.mongo import MongoJSONEncoder
from superdesk.utc import utcnow
from superdesk.logging import logger, item_msg
from eve.methods.common import resolve_document_etag
from elasticsearch.exceptions import RequestError
class EveBackend():
def find_one(self, endpoint_name, req, **lookup):
backend = self._backend(endpoint_name)
item = backend.find_one(endpoint_name, req=req, **lookup)
search_backend = self._lookup_backend(endpoint_name, fallback=True)
if search_backend:
item_search = search_backend.find_one(endpoint_name, req=req, **lookup)
if item is None and item_search:
item = item_search
logger.warn(item_msg('item is only in elastic', item))
elif item_search is None and item:
logger.warn(item_msg('item is only in mongo', item))
try:
logger.info(item_msg('trying to add item to elastic', item))
search_backend.insert(endpoint_name, [item])
except RequestError as e:
logger.error(item_msg('failed to add item into elastic error={}'.format(str(e)), item))
return item
def find(self, endpoint_name, where, max_results=0):
"""Find items for given endpoint using mongo query in python dict object.
It handles request creation here so no need to do this in service.
:param string endpoint_name
:param dict where
:param int max_results
"""
req = ParsedRequest()
req.where = MongoJSONEncoder().encode(where)
req.max_results = max_results
return self.get_from_mongo(endpoint_name, req, None)
def get(self, endpoint_name, req, lookup):
backend = self._lookup_backend(endpoint_name, fallback=True)
cursor = backend.find(endpoint_name, req, lookup)
if not cursor.count():
return cursor # return 304 if not modified
else:
# but fetch without filter if there is a change
req.if_modified_since = None
return backend.find(endpoint_name, req, lookup)
def get_from_mongo(self, endpoint_name, req, lookup):
req.if_modified_since = None
backend = self._backend(endpoint_name)
return backend.find(endpoint_name, req, lookup)
def find_and_modify(self, endpoint_name, **kwargs):
backend = self._backend(endpoint_name)
return backend.driver.db[endpoint_name].find_and_modify(**kwargs)
def create(self, endpoint_name, docs, **kwargs):
"""Insert documents into given collection.
:param endpoint_name: api resource name
:param docs: list of docs to be inserted
"""
ids = self.create_in_mongo(endpoint_name, docs, **kwargs)
self.create_in_search(endpoint_name, docs, **kwargs)
return ids
def create_in_mongo(self, endpoint_name, docs, **kwargs):
for doc in docs:
doc.setdefault(config.ETAG, document_etag(doc))
self.set_default_dates(doc)
backend = self._backend(endpoint_name)
ids = backend.insert(endpoint_name, docs)
return ids
def create_in_search(self, endpoint_name, docs, **kwargs):
search_backend = self._lookup_backend(endpoint_name)
if search_backend:
search_backend.insert(endpoint_name, docs, **kwargs)
def update(self, endpoint_name, id, updates, original):
"""Update document with given id.
:param endpoint_name: api resource name
:param id: document id
:param updates: changes made to document
:param original: original document
"""
# change etag on update so following request will refetch it
updates.setdefault(config.LAST_UPDATED, utcnow())
if config.ETAG not in updates:
updated = original.copy()
updated.update(updates)
resolve_document_etag(updated, endpoint_name)
updates[config.ETAG] = updated[config.ETAG]
return self.system_update(endpoint_name, id, updates, original)
def system_update(self, endpoint_name, id, updates, original):
"""Only update what is provided, without affecting etag/last_updated.
This is useful when you want to make some changes without affecting users.
:param endpoint_name: api resource name
:param id: document id
:param updates: changes made to document
:param original: original document
"""
backend = self._backend(endpoint_name)
res = backend.update(endpoint_name, id, updates, original)
search_backend = self._lookup_backend(endpoint_name)
if search_backend is not None:
doc = backend.find_one(endpoint_name, req=None, _id=id)
search_backend.update(endpoint_name, id, doc)
return res if res is not None else updates
def replace(self, endpoint_name, id, document, original):
res = self.replace_in_mongo(endpoint_name, id, document, original)
self.replace_in_search(endpoint_name, id, document, original)
return res
def replace_in_mongo(self, endpoint_name, id, document, original):
backend = self._backend(endpoint_name)
res = backend.replace(endpoint_name, id, document, original)
return res
def replace_in_search(self, endpoint_name, id, document, original):
search_backend = self._lookup_backend(endpoint_name)
if search_backend is not None:
search_backend.replace(endpoint_name, id, document)
def delete(self, endpoint_name, lookup):
"""
Delete method to delete by using mongo query syntax
:param endpoint_name: Name of the endpoint
:param lookup: User mongo query syntax. example 1. {'_id':123}, 2. {'item_id': {'$in': [123, 234]}}
:returns:
Returns the mongo remove command response. {'n': 12, 'ok': 1}
"""
backend = self._backend(endpoint_name)
search_backend = self._lookup_backend(endpoint_name)
docs = self.get_from_mongo(endpoint_name, lookup=lookup, req=ParsedRequest())
ids = [doc[config.ID_FIELD] for doc in docs]
res = backend.remove(endpoint_name, {config.ID_FIELD: {'$in': ids}})
if res and res.get('n', 0) > 0 and search_backend is not None:
self._remove_documents_from_search_backend(endpoint_name, ids)
if res and res.get('n', 0) == 0:
logger.warn("No documents for {} resource were deleted using lookup {}".format(endpoint_name, lookup))
return res
def _remove_documents_from_search_backend(self, endpoint_name, ids):
"""
remove documents from search backend.
:param endpoint_name: name of the endpoint
:param ids: list of ids
"""
ids = [str(doc_id) for doc_id in ids]
batch_size = 500
logger.info("total documents to be removed {}".format(len(ids)))
for i in range(0, len(ids), batch_size):
batch = ids[i:i + batch_size]
query = {'query': {'terms': {'{}._id'.format(endpoint_name): batch}}}
app.data._search_backend(endpoint_name).remove(endpoint_name, query)
logger.info("Removed {} documents from {}.".format(len(batch), endpoint_name))
def _datasource(self, endpoint_name):
return app.data._datasource(endpoint_name)[0]
def _backend(self, endpoint_name):
return app.data._backend(endpoint_name)
def _lookup_backend(self, endpoint_name, fallback=False):
backend = app.data._search_backend(endpoint_name)
if backend is None and fallback:
backend = app.data._backend(endpoint_name)
return backend
def set_default_dates(self, doc):
now = utcnow()
doc.setdefault(config.DATE_CREATED, now)
doc.setdefault(config.LAST_UPDATED, now)
| agpl-3.0 |
kou/zulip | zerver/lib/export.py | 2 | 69765 | # This is the main code for the `./manage.py export` data export tool.
# User docs: https://zulip.readthedocs.io/en/latest/production/export-and-import.html
#
# Most developers will interact with this primarily when they add a
# new table to the schema, in which case they likely need to (1) add
# it the lists in `ALL_ZULIP_TABLES` and similar data structures and
# (2) if it doesn't belong in EXCLUDED_TABLES, add a Config object for
# it to get_realm_config.
import datetime
import glob
import logging
import os
import shutil
import subprocess
import tempfile
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import orjson
from boto3.resources.base import ServiceResource
from django.apps import apps
from django.conf import settings
from django.forms.models import model_to_dict
from django.utils.timezone import is_naive as timezone_is_naive
from django.utils.timezone import make_aware as timezone_make_aware
import zerver.lib.upload
from analytics.models import RealmCount, StreamCount, UserCount
from scripts.lib.zulip_tools import overwrite_symlink
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.pysa import mark_sanitized
from zerver.lib.upload import get_bucket
from zerver.models import (
AlertWord,
Attachment,
BotConfigData,
BotStorageData,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
Huddle,
Message,
MutedTopic,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
Recipient,
Service,
Stream,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
get_display_recipient,
get_system_bot,
get_user_profile_by_id,
)
# Custom mypy types follow:
Record = Dict[str, Any]
TableName = str
TableData = Dict[TableName, List[Record]]
Field = str
Path = str
Context = Dict[str, Any]
FilterArgs = Dict[str, Any]
IdSource = Tuple[TableName, Field]
SourceFilter = Callable[[Record], bool]
# These next two types are callbacks, which mypy does not
# support well, because PEP 484 says "using callbacks
# with keyword arguments is not perceived as a common use case."
# CustomFetch = Callable[[TableData, Config, Context], None]
# PostProcessData = Callable[[TableData, Config, Context], None]
CustomFetch = Any # TODO: make more specific, see above
PostProcessData = Any # TODO: make more specific
# The keys of our MessageOutput variables are normally
# List[Record], but when we write partials, we can get
# lists of integers or a single integer.
# TODO: This could maybe be improved using TypedDict?
MessageOutput = Dict[str, Union[List[Record], List[int], int]]
MESSAGE_BATCH_CHUNK_SIZE = 1000
ALL_ZULIP_TABLES = {
'analytics_fillstate',
'analytics_installationcount',
'analytics_realmcount',
'analytics_streamcount',
'analytics_usercount',
'otp_static_staticdevice',
'otp_static_statictoken',
'otp_totp_totpdevice',
'social_auth_association',
'social_auth_code',
'social_auth_nonce',
'social_auth_partial',
'social_auth_usersocialauth',
'two_factor_phonedevice',
'zerver_alertword',
'zerver_archivedattachment',
'zerver_archivedattachment_messages',
'zerver_archivedmessage',
'zerver_archivedusermessage',
'zerver_attachment',
'zerver_attachment_messages',
'zerver_archivedreaction',
'zerver_archivedsubmessage',
'zerver_archivetransaction',
'zerver_botconfigdata',
'zerver_botstoragedata',
'zerver_client',
'zerver_customprofilefield',
'zerver_customprofilefieldvalue',
'zerver_defaultstream',
'zerver_defaultstreamgroup',
'zerver_defaultstreamgroup_streams',
'zerver_draft',
'zerver_emailchangestatus',
'zerver_huddle',
'zerver_message',
'zerver_missedmessageemailaddress',
'zerver_multiuseinvite',
'zerver_multiuseinvite_streams',
'zerver_preregistrationuser',
'zerver_preregistrationuser_streams',
'zerver_pushdevicetoken',
'zerver_reaction',
'zerver_realm',
'zerver_realmauditlog',
'zerver_realmdomain',
'zerver_realmemoji',
'zerver_realmfilter',
'zerver_recipient',
'zerver_scheduledemail',
'zerver_scheduledemail_users',
'zerver_scheduledmessage',
'zerver_service',
'zerver_stream',
'zerver_submessage',
'zerver_subscription',
'zerver_useractivity',
'zerver_useractivityinterval',
'zerver_usergroup',
'zerver_usergroupmembership',
'zerver_userhotspot',
'zerver_usermessage',
'zerver_userpresence',
'zerver_userprofile',
'zerver_userprofile_groups',
'zerver_userprofile_user_permissions',
'zerver_userstatus',
'zerver_mutedtopic',
}
# This set contains those database tables that we expect to not be
# included in the export. This tool does validation to ensure that
# every table in the database is either exported or listed here, to
# ensure we never accidentally fail to export a table.
NON_EXPORTED_TABLES = {
# These invitation/confirmation flow tables don't make sense to
# export, since invitations links will be broken by the server URL
# change anyway:
'zerver_emailchangestatus',
'zerver_multiuseinvite',
'zerver_multiuseinvite_streams',
'zerver_preregistrationuser',
'zerver_preregistrationuser_streams',
# Missed message addresses are low value to export since
# missed-message email addresses include the server's hostname and
# expire after a few days.
'zerver_missedmessageemailaddress',
# When switching servers, clients will need to re-log in and
# reregister for push notifications anyway.
'zerver_pushdevicetoken',
# We don't use these generated Django tables
'zerver_userprofile_groups',
'zerver_userprofile_user_permissions',
# These is used for scheduling future activity; it could make
# sense to export, but is relatively low value.
'zerver_scheduledemail',
'zerver_scheduledemail_users',
'zerver_scheduledmessage',
# These tables are related to a user's 2FA authentication
# configuration, which will need to be re-setup on the new server.
'two_factor_phonedevice',
'otp_static_staticdevice',
'otp_static_statictoken',
'otp_totp_totpdevice',
# These archive tables should not be exported (they are to support
# restoring content accidentally deleted due to software bugs in
# the retention policy feature)
'zerver_archivedmessage',
'zerver_archivedusermessage',
'zerver_archivedattachment',
'zerver_archivedattachment_messages',
'zerver_archivedreaction',
'zerver_archivedsubmessage',
'zerver_archivetransaction',
# Social auth tables are not needed post-export, since we don't
# use any of this state outside of a direct authentication flow.
'social_auth_association',
'social_auth_code',
'social_auth_nonce',
'social_auth_partial',
'social_auth_usersocialauth',
# We will likely never want to migrate this table, since it's a
# total of all the realmcount values on the server. Might need to
# recompute it after a fillstate import.
'analytics_installationcount',
# Fillstate will require some cleverness to do the right partial export.
'analytics_fillstate',
# These are for unfinished features; we'll want to add them to the
# export before they reach full production status.
'zerver_defaultstreamgroup',
'zerver_defaultstreamgroup_streams',
'zerver_submessage',
# This is low priority, since users can easily just reset themselves to away.
'zerver_userstatus',
# Drafts don't need to be exported as they are supposed to be more ephemeral.
'zerver_draft',
# For any tables listed below here, it's a bug that they are not present in the export.
}
IMPLICIT_TABLES = {
# ManyToMany relationships are exported implicitly when importing
# the parent table.
'zerver_attachment_messages',
}
ATTACHMENT_TABLES = {
'zerver_attachment',
}
MESSAGE_TABLES = {
# message tables get special treatment, because they're by far our
# largest tables and need to be paginated.
'zerver_message',
'zerver_usermessage',
# zerver_reaction belongs here, since it's added late because it
# has a foreign key into the Message table.
'zerver_reaction',
}
# These get their own file as analytics data can be quite large and
# would otherwise make realm.json unpleasant to manually inspect
ANALYTICS_TABLES = {
'analytics_realmcount',
'analytics_streamcount',
'analytics_usercount',
}
# This data structure lists all the Django DateTimeField fields in the
# data model. These are converted to floats during the export process
# via floatify_datetime_fields, and back during the import process.
#
# TODO: This data structure could likely eventually be replaced by
# inspecting the corresponding Django models
DATE_FIELDS: Dict[TableName, List[Field]] = {
'zerver_attachment': ['create_time'],
'zerver_message': ['last_edit_time', 'date_sent'],
'zerver_mutedtopic': ['date_muted'],
'zerver_realm': ['date_created'],
'zerver_stream': ['date_created'],
'zerver_useractivity': ['last_visit'],
'zerver_useractivityinterval': ['start', 'end'],
'zerver_userpresence': ['timestamp'],
'zerver_userprofile': ['date_joined', 'last_login', 'last_reminder'],
'zerver_userprofile_mirrordummy': ['date_joined', 'last_login', 'last_reminder'],
'zerver_realmauditlog': ['event_time'],
'zerver_userhotspot': ['timestamp'],
'analytics_installationcount': ['end_time'],
'analytics_realmcount': ['end_time'],
'analytics_usercount': ['end_time'],
'analytics_streamcount': ['end_time'],
}
BITHANDLER_FIELDS: Dict[TableName, List[Field]] = {
'zerver_realm': ['authentication_methods'],
}
def sanity_check_output(data: TableData) -> None:
# First, we verify that the export tool has a declared
# configuration for every table declared in the `models.py` files.
target_models = (
list(apps.get_app_config('analytics').get_models(include_auto_created=True)) +
list(apps.get_app_config('django_otp').get_models(include_auto_created=True)) +
list(apps.get_app_config('otp_static').get_models(include_auto_created=True)) +
list(apps.get_app_config('otp_totp').get_models(include_auto_created=True)) +
list(apps.get_app_config('social_django').get_models(include_auto_created=True)) +
list(apps.get_app_config('two_factor').get_models(include_auto_created=True)) +
list(apps.get_app_config('zerver').get_models(include_auto_created=True))
)
all_tables_db = {model._meta.db_table for model in target_models}
# These assertion statements will fire when we add a new database
# table that is not included in Zulip's data exports. Generally,
# you can add your new table to `ALL_ZULIP_TABLES` and
# `NON_EXPORTED_TABLES` during early work on a new feature so that
# CI passes.
#
# We'll want to make sure we handle it for exports before
# releasing the new feature, but doing so correctly requires some
# expertise on this export system.
assert ALL_ZULIP_TABLES == all_tables_db
assert NON_EXPORTED_TABLES.issubset(ALL_ZULIP_TABLES)
assert IMPLICIT_TABLES.issubset(ALL_ZULIP_TABLES)
assert ATTACHMENT_TABLES.issubset(ALL_ZULIP_TABLES)
assert ANALYTICS_TABLES.issubset(ALL_ZULIP_TABLES)
tables = set(ALL_ZULIP_TABLES)
tables -= NON_EXPORTED_TABLES
tables -= IMPLICIT_TABLES
tables -= MESSAGE_TABLES
tables -= ATTACHMENT_TABLES
tables -= ANALYTICS_TABLES
for table in tables:
if table not in data:
logging.warning('??? NO DATA EXPORTED FOR TABLE %s!!!', table)
def write_data_to_file(output_file: Path, data: Any) -> None:
with open(output_file, "wb") as f:
# Because we don't pass a default handler, OPT_PASSTHROUGH_DATETIME
# actually causes orjson to raise a TypeError on datetime objects. This
# is what we want, because it helps us check that we correctly
# post-processed them to serialize to UNIX timestamps rather than ISO
# 8601 strings for historical reasons.
f.write(orjson.dumps(data, option=orjson.OPT_INDENT_2 | orjson.OPT_PASSTHROUGH_DATETIME))
def make_raw(query: Any, exclude: Optional[List[Field]]=None) -> List[Record]:
'''
Takes a Django query and returns a JSONable list
of dictionaries corresponding to the database rows.
'''
rows = []
for instance in query:
data = model_to_dict(instance, exclude=exclude)
"""
In Django 1.11.5, model_to_dict evaluates the QuerySet of
many-to-many field to give us a list of instances. We require
a list of primary keys, so we get the primary keys from the
instances below.
"""
for field in instance._meta.many_to_many:
value = data[field.name]
data[field.name] = [row.id for row in value]
rows.append(data)
return rows
def floatify_datetime_fields(data: TableData, table: TableName) -> None:
for item in data[table]:
for field in DATE_FIELDS[table]:
orig_dt = item[field]
if orig_dt is None:
continue
if timezone_is_naive(orig_dt):
logging.warning("Naive datetime:", item)
dt = timezone_make_aware(orig_dt)
else:
dt = orig_dt
utc_naive = dt.replace(tzinfo=None) - dt.utcoffset()
item[field] = (utc_naive - datetime.datetime(1970, 1, 1)).total_seconds()
def listify_bithandler_fields(data: TableData, table: TableName) -> None:
for item in data[table]:
for field in BITHANDLER_FIELDS[table]:
item[field] = list(item[field])
class Config:
'''A Config object configures a single table for exporting (and, maybe
some day importing as well. This configuration defines what
process needs to be followed to correctly extract the set of
objects to export.
You should never mutate Config objects as part of the export;
instead use the data to determine how you populate other
data structures.
There are parent/children relationships between Config objects.
The parent should be instantiated first. The child will
append itself to the parent's list of children.
'''
def __init__(self, table: Optional[str]=None,
model: Optional[Any]=None,
normal_parent: Optional['Config']=None,
virtual_parent: Optional['Config']=None,
filter_args: Optional[FilterArgs]=None,
custom_fetch: Optional[CustomFetch]=None,
custom_tables: Optional[List[TableName]]=None,
concat_and_destroy: Optional[List[TableName]]=None,
id_source: Optional[IdSource]=None,
source_filter: Optional[SourceFilter]=None,
parent_key: Optional[Field]=None,
use_all: bool=False,
is_seeded: bool=False,
exclude: Optional[List[Field]]=None) -> None:
assert table or custom_tables
self.table = table
self.model = model
self.normal_parent = normal_parent
self.virtual_parent = virtual_parent
self.filter_args = filter_args
self.parent_key = parent_key
self.use_all = use_all
self.is_seeded = is_seeded
self.exclude = exclude
self.custom_fetch = custom_fetch
self.custom_tables = custom_tables
self.concat_and_destroy = concat_and_destroy
self.id_source = id_source
self.source_filter = source_filter
self.children: List[Config] = []
if normal_parent is not None:
self.parent: Optional[Config] = normal_parent
else:
self.parent = None
if virtual_parent is not None and normal_parent is not None:
raise AssertionError('''
If you specify a normal_parent, please
do not create a virtual_parent.
''')
if normal_parent is not None:
normal_parent.children.append(self)
elif virtual_parent is not None:
virtual_parent.children.append(self)
elif is_seeded is None:
raise AssertionError('''
You must specify a parent if you are
not using is_seeded.
''')
if self.id_source is not None:
if self.virtual_parent is None:
raise AssertionError('''
You must specify a virtual_parent if you are
using id_source.''')
if self.id_source[0] != self.virtual_parent.table:
raise AssertionError(f'''
Configuration error. To populate {self.table}, you
want data from {self.id_source[0]}, but that differs from
the table name of your virtual parent ({self.virtual_parent.table}),
which suggests you many not have set up
the ordering correctly. You may simply
need to assign a virtual_parent, or there
may be deeper issues going on.''')
def export_from_config(response: TableData, config: Config, seed_object: Optional[Any]=None,
context: Optional[Context]=None) -> None:
table = config.table
parent = config.parent
model = config.model
if context is None:
context = {}
if config.custom_tables:
exported_tables = config.custom_tables
else:
assert table is not None, '''
You must specify config.custom_tables if you
are not specifying config.table'''
exported_tables = [table]
for t in exported_tables:
logging.info('Exporting via export_from_config: %s', t)
rows = None
if config.is_seeded:
rows = [seed_object]
elif config.custom_fetch:
config.custom_fetch(
response=response,
config=config,
context=context,
)
if config.custom_tables:
for t in config.custom_tables:
if t not in response:
raise AssertionError(f'Custom fetch failed to populate {t}')
elif config.concat_and_destroy:
# When we concat_and_destroy, we are working with
# temporary "tables" that are lists of records that
# should already be ready to export.
data: List[Record] = []
for t in config.concat_and_destroy:
data += response[t]
del response[t]
logging.info('Deleted temporary %s', t)
assert table is not None
response[table] = data
elif config.use_all:
assert model is not None
query = model.objects.all()
rows = list(query)
elif config.normal_parent:
# In this mode, our current model is figuratively Article,
# and normal_parent is figuratively Blog, and
# now we just need to get all the articles
# contained by the blogs.
model = config.model
assert parent is not None
assert parent.table is not None
assert config.parent_key is not None
parent_ids = [r['id'] for r in response[parent.table]]
filter_parms: Dict[str, Any] = {config.parent_key: parent_ids}
if config.filter_args is not None:
filter_parms.update(config.filter_args)
assert model is not None
query = model.objects.filter(**filter_parms)
rows = list(query)
elif config.id_source:
# In this mode, we are the figurative Blog, and we now
# need to look at the current response to get all the
# blog ids from the Article rows we fetched previously.
model = config.model
assert model is not None
# This will be a tuple of the form ('zerver_article', 'blog').
(child_table, field) = config.id_source
child_rows = response[child_table]
if config.source_filter:
child_rows = [r for r in child_rows if config.source_filter(r)]
lookup_ids = [r[field] for r in child_rows]
filter_parms = dict(id__in=lookup_ids)
if config.filter_args:
filter_parms.update(config.filter_args)
query = model.objects.filter(**filter_parms)
rows = list(query)
if rows is not None:
assert table is not None # Hint for mypy
response[table] = make_raw(rows, exclude=config.exclude)
# Post-process rows
for t in exported_tables:
if t in DATE_FIELDS:
floatify_datetime_fields(response, t)
if table in BITHANDLER_FIELDS:
listify_bithandler_fields(response, table)
# Now walk our children. It's extremely important to respect
# the order of children here.
for child_config in config.children:
export_from_config(
response=response,
config=child_config,
context=context,
)
def get_realm_config() -> Config:
# This function generates the main Config object that defines how
# to do a full-realm export of a single realm from a Zulip server.
realm_config = Config(
table='zerver_realm',
is_seeded=True,
)
Config(
table='zerver_defaultstream',
model=DefaultStream,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_customprofilefield',
model=CustomProfileField,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmemoji',
model=RealmEmoji,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmdomain',
model=RealmDomain,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_realmfilter',
model=RealmFilter,
normal_parent=realm_config,
parent_key='realm_id__in',
)
Config(
table='zerver_client',
model=Client,
virtual_parent=realm_config,
use_all=True,
)
user_profile_config = Config(
custom_tables=[
'zerver_userprofile',
'zerver_userprofile_mirrordummy',
],
# set table for children who treat us as normal parent
table='zerver_userprofile',
virtual_parent=realm_config,
custom_fetch=fetch_user_profile,
)
Config(
table='zerver_alertword',
model=AlertWord,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
user_groups_config = Config(
table='zerver_usergroup',
model=UserGroup,
normal_parent=realm_config,
parent_key='realm__in',
)
Config(
table='zerver_usergroupmembership',
model=UserGroupMembership,
normal_parent=user_groups_config,
parent_key='user_group__in',
)
Config(
custom_tables=[
'zerver_userprofile_crossrealm',
],
virtual_parent=user_profile_config,
custom_fetch=fetch_user_profile_cross_realm,
)
Config(
table='zerver_userpresence',
model=UserPresence,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_customprofilefieldvalue',
model=CustomProfileFieldValue,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_useractivity',
model=UserActivity,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_useractivityinterval',
model=UserActivityInterval,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_realmauditlog',
model=RealmAuditLog,
normal_parent=user_profile_config,
parent_key='modified_user__in',
)
Config(
table='zerver_userhotspot',
model=UserHotspot,
normal_parent=user_profile_config,
parent_key='user__in',
)
Config(
table='zerver_mutedtopic',
model=MutedTopic,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_service',
model=Service,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
Config(
table='zerver_botstoragedata',
model=BotStorageData,
normal_parent=user_profile_config,
parent_key='bot_profile__in',
)
Config(
table='zerver_botconfigdata',
model=BotConfigData,
normal_parent=user_profile_config,
parent_key='bot_profile__in',
)
# Some of these tables are intermediate "tables" that we
# create only for the export. Think of them as similar to views.
user_subscription_config = Config(
table='_user_subscription',
model=Subscription,
normal_parent=user_profile_config,
filter_args={'recipient__type': Recipient.PERSONAL},
parent_key='user_profile__in',
)
Config(
table='_user_recipient',
model=Recipient,
virtual_parent=user_subscription_config,
id_source=('_user_subscription', 'recipient'),
)
#
stream_config = Config(
table='zerver_stream',
model=Stream,
exclude=['email_token'],
normal_parent=realm_config,
parent_key='realm_id__in',
)
stream_recipient_config = Config(
table='_stream_recipient',
model=Recipient,
normal_parent=stream_config,
parent_key='type_id__in',
filter_args={'type': Recipient.STREAM},
)
Config(
table='_stream_subscription',
model=Subscription,
normal_parent=stream_recipient_config,
parent_key='recipient_id__in',
)
#
Config(
custom_tables=[
'_huddle_recipient',
'_huddle_subscription',
'zerver_huddle',
],
normal_parent=user_profile_config,
custom_fetch=fetch_huddle_objects,
)
# Now build permanent tables from our temp tables.
Config(
table='zerver_recipient',
virtual_parent=realm_config,
concat_and_destroy=[
'_user_recipient',
'_stream_recipient',
'_huddle_recipient',
],
)
Config(
table='zerver_subscription',
virtual_parent=realm_config,
concat_and_destroy=[
'_user_subscription',
'_stream_subscription',
'_huddle_subscription',
],
)
return realm_config
def fetch_user_profile(response: TableData, config: Config, context: Context) -> None:
realm = context['realm']
exportable_user_ids = context['exportable_user_ids']
query = UserProfile.objects.filter(realm_id=realm.id)
exclude = ['password', 'api_key']
rows = make_raw(list(query), exclude=exclude)
normal_rows: List[Record] = []
dummy_rows: List[Record] = []
for row in rows:
if exportable_user_ids is not None:
if row['id'] in exportable_user_ids:
assert not row['is_mirror_dummy']
else:
# Convert non-exportable users to
# inactive is_mirror_dummy users.
row['is_mirror_dummy'] = True
row['is_active'] = False
if row['is_mirror_dummy']:
dummy_rows.append(row)
else:
normal_rows.append(row)
response['zerver_userprofile'] = normal_rows
response['zerver_userprofile_mirrordummy'] = dummy_rows
def fetch_user_profile_cross_realm(response: TableData, config: Config, context: Context) -> None:
realm = context['realm']
response['zerver_userprofile_crossrealm'] = []
bot_name_to_default_email = {
"NOTIFICATION_BOT": "notification-bot@zulip.com",
"EMAIL_GATEWAY_BOT": "emailgateway@zulip.com",
"WELCOME_BOT": "welcome-bot@zulip.com",
}
if realm.string_id == settings.SYSTEM_BOT_REALM:
return
for bot in settings.INTERNAL_BOTS:
bot_name = bot["var_name"]
if bot_name not in bot_name_to_default_email:
continue
bot_email = bot["email_template"] % (settings.INTERNAL_BOT_DOMAIN,)
bot_default_email = bot_name_to_default_email[bot_name]
bot_user_id = get_system_bot(bot_email).id
recipient_id = Recipient.objects.get(type_id=bot_user_id, type=Recipient.PERSONAL).id
response['zerver_userprofile_crossrealm'].append(dict(
email=bot_default_email,
id=bot_user_id,
recipient_id=recipient_id,
))
def fetch_attachment_data(response: TableData, realm_id: int, message_ids: Set[int]) -> None:
filter_args = {'realm_id': realm_id}
query = Attachment.objects.filter(**filter_args)
response['zerver_attachment'] = make_raw(list(query))
floatify_datetime_fields(response, 'zerver_attachment')
'''
We usually export most messages for the realm, but not
quite ALL messages for the realm. So, we need to
clean up our attachment data to have correct
values for response['zerver_attachment'][<n>]['messages'].
'''
for row in response['zerver_attachment']:
filterer_message_ids = set(row['messages']).intersection(message_ids)
row['messages'] = sorted(filterer_message_ids)
'''
Attachments can be connected to multiple messages, although
it's most common to have just one message. Regardless,
if none of those message(s) survived the filtering above
for a particular attachment, then we won't export the
attachment row.
'''
response['zerver_attachment'] = [
row for row in response['zerver_attachment']
if row['messages']]
def fetch_reaction_data(response: TableData, message_ids: Set[int]) -> None:
query = Reaction.objects.filter(message_id__in=list(message_ids))
response['zerver_reaction'] = make_raw(list(query))
def fetch_huddle_objects(response: TableData, config: Config, context: Context) -> None:
realm = context['realm']
assert config.parent is not None
assert config.parent.table is not None
user_profile_ids = {r['id'] for r in response[config.parent.table]}
# First we get all huddles involving someone in the realm.
realm_huddle_subs = Subscription.objects.select_related("recipient").filter(
recipient__type=Recipient.HUDDLE, user_profile__in=user_profile_ids)
realm_huddle_recipient_ids = {sub.recipient_id for sub in realm_huddle_subs}
# Mark all Huddles whose recipient ID contains a cross-realm user.
unsafe_huddle_recipient_ids = set()
for sub in Subscription.objects.select_related().filter(recipient__in=realm_huddle_recipient_ids):
if sub.user_profile.realm != realm:
# In almost every case the other realm will be zulip.com
unsafe_huddle_recipient_ids.add(sub.recipient_id)
# Now filter down to just those huddles that are entirely within the realm.
#
# This is important for ensuring that the User objects needed
# to import it on the other end exist (since we're only
# exporting the users from this realm), at the cost of losing
# some of these cross-realm messages.
huddle_subs = [sub for sub in realm_huddle_subs if sub.recipient_id not in unsafe_huddle_recipient_ids]
huddle_recipient_ids = {sub.recipient_id for sub in huddle_subs}
huddle_ids = {sub.recipient.type_id for sub in huddle_subs}
huddle_subscription_dicts = make_raw(huddle_subs)
huddle_recipients = make_raw(Recipient.objects.filter(id__in=huddle_recipient_ids))
response['_huddle_recipient'] = huddle_recipients
response['_huddle_subscription'] = huddle_subscription_dicts
response['zerver_huddle'] = make_raw(Huddle.objects.filter(id__in=huddle_ids))
def fetch_usermessages(realm: Realm,
message_ids: Set[int],
user_profile_ids: Set[int],
message_filename: Path,
consent_message_id: Optional[int]=None) -> List[Record]:
# UserMessage export security rule: You can export UserMessages
# for the messages you exported for the users in your realm.
user_message_query = UserMessage.objects.filter(user_profile__realm=realm,
message_id__in=message_ids)
if consent_message_id is not None:
consented_user_ids = get_consented_user_ids(consent_message_id)
user_profile_ids = user_profile_ids & consented_user_ids
user_message_chunk = []
for user_message in user_message_query:
if user_message.user_profile_id not in user_profile_ids:
continue
user_message_obj = model_to_dict(user_message)
user_message_obj['flags_mask'] = user_message.flags.mask
del user_message_obj['flags']
user_message_chunk.append(user_message_obj)
logging.info("Fetched UserMessages for %s", message_filename)
return user_message_chunk
def export_usermessages_batch(input_path: Path, output_path: Path,
consent_message_id: Optional[int]=None) -> None:
"""As part of the system for doing parallel exports, this runs on one
batch of Message objects and adds the corresponding UserMessage
objects. (This is called by the export_usermessage_batch
management command)."""
with open(input_path, "rb") as input_file:
output = orjson.loads(input_file.read())
message_ids = [item['id'] for item in output['zerver_message']]
user_profile_ids = set(output['zerver_userprofile_ids'])
del output['zerver_userprofile_ids']
realm = Realm.objects.get(id=output['realm_id'])
del output['realm_id']
output['zerver_usermessage'] = fetch_usermessages(realm, set(message_ids), user_profile_ids,
output_path, consent_message_id)
write_message_export(output_path, output)
os.unlink(input_path)
def write_message_export(message_filename: Path, output: MessageOutput) -> None:
write_data_to_file(output_file=message_filename, data=output)
logging.info("Dumped to %s", message_filename)
def export_partial_message_files(realm: Realm,
response: TableData,
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE,
output_dir: Optional[Path]=None,
public_only: bool=False,
consent_message_id: Optional[int]=None) -> Set[int]:
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="zulip-export")
def get_ids(records: List[Record]) -> Set[int]:
return {x['id'] for x in records}
# Basic security rule: You can export everything either...
# - sent by someone in your exportable_user_ids
# OR
# - received by someone in your exportable_user_ids (which
# equates to a recipient object we are exporting)
#
# TODO: In theory, you should be able to export messages in
# cross-realm PM threads; currently, this only exports cross-realm
# messages received by your realm that were sent by Zulip system
# bots (e.g. emailgateway, notification-bot).
# Here, "we" and "us" refers to the inner circle of users who
# were specified as being allowed to be exported. "Them"
# refers to other users.
user_ids_for_us = get_ids(
response['zerver_userprofile'],
)
ids_of_our_possible_senders = get_ids(
response['zerver_userprofile'] +
response['zerver_userprofile_mirrordummy'] +
response['zerver_userprofile_crossrealm'])
consented_user_ids: Set[int] = set()
if consent_message_id is not None:
consented_user_ids = get_consented_user_ids(consent_message_id)
if public_only:
recipient_streams = Stream.objects.filter(realm=realm, invite_only=False)
recipient_ids = Recipient.objects.filter(
type=Recipient.STREAM, type_id__in=recipient_streams).values_list("id", flat=True)
recipient_ids_for_us = get_ids(response['zerver_recipient']) & set(recipient_ids)
elif consent_message_id is not None:
public_streams = Stream.objects.filter(realm=realm, invite_only=False)
public_stream_recipient_ids = Recipient.objects.filter(
type=Recipient.STREAM, type_id__in=public_streams).values_list("id", flat=True)
consented_recipient_ids = Subscription.objects.filter(user_profile__id__in=consented_user_ids). \
values_list("recipient_id", flat=True)
recipient_ids = set(public_stream_recipient_ids) | set(consented_recipient_ids)
recipient_ids_for_us = get_ids(response['zerver_recipient']) & recipient_ids
else:
recipient_ids_for_us = get_ids(response['zerver_recipient'])
# For a full export, we have implicit consent for all users in the export.
consented_user_ids = user_ids_for_us
if public_only:
messages_we_received = Message.objects.filter(
sender__in=ids_of_our_possible_senders,
recipient__in=recipient_ids_for_us,
).order_by('id')
# For the public stream export, we only need the messages those streams received.
message_queries = [
messages_we_received,
]
else:
# We capture most messages here: Messages that were sent by
# anyone in the export and received by any of the users who we
# have consent to export.
messages_we_received = Message.objects.filter(
sender__in=ids_of_our_possible_senders,
recipient__in=recipient_ids_for_us,
).order_by('id')
# The above query is missing some messages that consenting
# users have access to, namely, PMs sent by one of the users
# in our export to another user (since the only subscriber to
# a Recipient object for Recipient.PERSONAL is the recipient,
# not the sender). The `consented_user_ids` list has
# precisely those users whose Recipient.PERSONAL recipient ID
# was already present in recipient_ids_for_us above.
ids_of_non_exported_possible_recipients = ids_of_our_possible_senders - consented_user_ids
recipients_for_them = Recipient.objects.filter(
type=Recipient.PERSONAL,
type_id__in=ids_of_non_exported_possible_recipients).values("id")
recipient_ids_for_them = get_ids(recipients_for_them)
messages_we_sent_to_them = Message.objects.filter(
sender__in=consented_user_ids,
recipient__in=recipient_ids_for_them,
).order_by('id')
message_queries = [
messages_we_received,
messages_we_sent_to_them,
]
all_message_ids: Set[int] = set()
dump_file_id = 1
for message_query in message_queries:
dump_file_id = write_message_partial_for_query(
realm=realm,
message_query=message_query,
dump_file_id=dump_file_id,
all_message_ids=all_message_ids,
output_dir=output_dir,
user_profile_ids=user_ids_for_us,
chunk_size=chunk_size,
)
return all_message_ids
def write_message_partial_for_query(realm: Realm, message_query: Any, dump_file_id: int,
all_message_ids: Set[int], output_dir: Path,
user_profile_ids: Set[int],
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> int:
min_id = -1
while True:
actual_query = message_query.filter(id__gt=min_id)[0:chunk_size]
message_chunk = make_raw(actual_query)
message_ids = {m['id'] for m in message_chunk}
assert len(message_ids.intersection(all_message_ids)) == 0
all_message_ids.update(message_ids)
if len(message_chunk) == 0:
break
# Figure out the name of our shard file.
message_filename = os.path.join(output_dir, f"messages-{dump_file_id:06}.json")
message_filename += '.partial'
logging.info("Fetched Messages for %s", message_filename)
# Clean up our messages.
table_data: TableData = {}
table_data['zerver_message'] = message_chunk
floatify_datetime_fields(table_data, 'zerver_message')
# Build up our output for the .partial file, which needs
# a list of user_profile_ids to search for (as well as
# the realm id).
output: MessageOutput = {}
output['zerver_message'] = table_data['zerver_message']
output['zerver_userprofile_ids'] = list(user_profile_ids)
output['realm_id'] = realm.id
# And write the data.
write_message_export(message_filename, output)
min_id = max(message_ids)
dump_file_id += 1
return dump_file_id
def export_uploads_and_avatars(realm: Realm, output_dir: Path) -> None:
uploads_output_dir = os.path.join(output_dir, 'uploads')
avatars_output_dir = os.path.join(output_dir, 'avatars')
realm_icons_output_dir = os.path.join(output_dir, 'realm_icons')
emoji_output_dir = os.path.join(output_dir, 'emoji')
for dir_path in (uploads_output_dir, avatars_output_dir, realm_icons_output_dir, emoji_output_dir):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if settings.LOCAL_UPLOADS_DIR:
# Small installations and developers will usually just store files locally.
export_uploads_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "files"),
output_dir=uploads_output_dir)
export_avatars_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars"),
output_dir=avatars_output_dir)
export_emoji_from_local(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars"),
output_dir=emoji_output_dir)
export_realm_icons(realm,
local_dir=os.path.join(settings.LOCAL_UPLOADS_DIR),
output_dir=realm_icons_output_dir)
else:
# Some bigger installations will have their data stored on S3.
export_files_from_s3(realm,
settings.S3_AVATAR_BUCKET,
output_dir=avatars_output_dir,
processing_avatars=True)
export_files_from_s3(realm,
settings.S3_AUTH_UPLOADS_BUCKET,
output_dir=uploads_output_dir)
export_files_from_s3(realm,
settings.S3_AVATAR_BUCKET,
output_dir=emoji_output_dir,
processing_emoji=True)
export_files_from_s3(realm,
settings.S3_AVATAR_BUCKET,
output_dir=realm_icons_output_dir,
processing_realm_icon_and_logo=True)
def _check_key_metadata(email_gateway_bot: Optional[UserProfile],
key: ServiceResource, processing_avatars: bool,
realm: Realm, user_ids: Set[int]) -> None:
# Helper function for export_files_from_s3
if 'realm_id' in key.metadata and key.metadata['realm_id'] != str(realm.id):
if email_gateway_bot is None or key.metadata['user_profile_id'] != str(email_gateway_bot.id):
raise AssertionError(f"Key metadata problem: {key.name} {key.metadata} / {realm.id}")
# Email gateway bot sends messages, potentially including attachments, cross-realm.
print(f"File uploaded by email gateway bot: {key.key} / {key.metadata}")
elif processing_avatars:
if 'user_profile_id' not in key.metadata:
raise AssertionError(f"Missing user_profile_id in key metadata: {key.metadata}")
if int(key.metadata['user_profile_id']) not in user_ids:
raise AssertionError(f"Wrong user_profile_id in key metadata: {key.metadata}")
elif 'realm_id' not in key.metadata:
raise AssertionError(f"Missing realm_id in key metadata: {key.metadata}")
def _get_exported_s3_record(
bucket_name: str,
key: ServiceResource,
processing_emoji: bool) -> Dict[str, Union[str, int]]:
# Helper function for export_files_from_s3
record = dict(s3_path=key.key, bucket=bucket_name,
size=key.content_length, last_modified=key.last_modified,
content_type=key.content_type, md5=key.e_tag)
record.update(key.metadata)
if processing_emoji:
record['file_name'] = os.path.basename(key.key)
if "user_profile_id" in record:
user_profile = get_user_profile_by_id(record['user_profile_id'])
record['user_profile_email'] = user_profile.email
# Fix the record ids
record['user_profile_id'] = int(record['user_profile_id'])
# A few early avatars don't have 'realm_id' on the object; fix their metadata
if 'realm_id' not in record:
record['realm_id'] = user_profile.realm_id
else:
# There are some rare cases in which 'user_profile_id' may not be present
# in S3 metadata. Eg: Exporting an organization which was created
# initially from a local export won't have the "user_profile_id" metadata
# set for realm_icons and realm_logos.
pass
if 'realm_id' in record:
record['realm_id'] = int(record['realm_id'])
else:
raise Exception("Missing realm_id")
return record
def _save_s3_object_to_file(key: ServiceResource, output_dir: str, processing_avatars: bool,
processing_emoji: bool, processing_realm_icon_and_logo: bool) -> None:
# Helper function for export_files_from_s3
if processing_avatars or processing_emoji or processing_realm_icon_and_logo:
filename = os.path.join(output_dir, key.key)
else:
fields = key.key.split('/')
if len(fields) != 3:
raise AssertionError(f"Suspicious key with invalid format {key.key}")
filename = os.path.join(output_dir, key.key)
if "../" in filename:
raise AssertionError(f"Suspicious file with invalid format {filename}")
# Use 'mark_sanitized' to cause Pysa to ignore the flow of user controlled
# data into the filesystem sink, because we've already prevented directory
# traversal with our assertion above.
dirname = mark_sanitized(os.path.dirname(filename))
if not os.path.exists(dirname):
os.makedirs(dirname)
key.download_file(filename)
def export_files_from_s3(realm: Realm, bucket_name: str, output_dir: Path,
processing_avatars: bool=False, processing_emoji: bool=False,
processing_realm_icon_and_logo: bool=False) -> None:
bucket = get_bucket(bucket_name)
records = []
logging.info("Downloading uploaded files from %s", bucket_name)
avatar_hash_values = set()
user_ids = set()
if processing_avatars:
for user_profile in UserProfile.objects.filter(realm=realm):
avatar_path = user_avatar_path_from_ids(user_profile.id, realm.id)
avatar_hash_values.add(avatar_path)
avatar_hash_values.add(avatar_path + ".original")
user_ids.add(user_profile.id)
if processing_realm_icon_and_logo:
object_prefix = f"{realm.id}/realm/"
elif processing_emoji:
object_prefix = f"{realm.id}/emoji/images/"
else:
object_prefix = f"{realm.id}/"
if settings.EMAIL_GATEWAY_BOT is not None:
email_gateway_bot: Optional[UserProfile] = get_system_bot(settings.EMAIL_GATEWAY_BOT)
else:
email_gateway_bot = None
count = 0
for bkey in bucket.objects.filter(Prefix=object_prefix):
if processing_avatars and bkey.Object().key not in avatar_hash_values:
continue
key = bucket.Object(bkey.key)
# This can happen if an email address has moved realms
_check_key_metadata(email_gateway_bot, key, processing_avatars, realm, user_ids)
record = _get_exported_s3_record(bucket_name, key, processing_emoji)
record['path'] = key.key
_save_s3_object_to_file(key, output_dir, processing_avatars, processing_emoji,
processing_realm_icon_and_logo)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s", count)
with open(os.path.join(output_dir, "records.json"), "wb") as records_file:
records_file.write(orjson.dumps(records, option=orjson.OPT_INDENT_2))
def export_uploads_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> None:
count = 0
records = []
for attachment in Attachment.objects.filter(realm_id=realm.id):
# Use 'mark_sanitized' to work around false positive caused by Pysa
# thinking that 'realm' (and thus 'attachment' and 'attachment.path_id')
# are user controlled
path_id = mark_sanitized(attachment.path_id)
local_path = os.path.join(local_dir, path_id)
output_path = os.path.join(output_dir, path_id)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
shutil.copy2(local_path, output_path)
stat = os.stat(local_path)
record = dict(realm_id=attachment.realm_id,
user_profile_id=attachment.owner.id,
user_profile_email=attachment.owner.email,
s3_path=path_id,
path=path_id,
size=stat.st_size,
last_modified=stat.st_mtime,
content_type=None)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s", count)
with open(os.path.join(output_dir, "records.json"), "wb") as records_file:
records_file.write(orjson.dumps(records, option=orjson.OPT_INDENT_2))
def export_avatars_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> None:
count = 0
records = []
users = list(UserProfile.objects.filter(realm=realm))
users += [
get_system_bot(settings.NOTIFICATION_BOT),
get_system_bot(settings.EMAIL_GATEWAY_BOT),
get_system_bot(settings.WELCOME_BOT),
]
for user in users:
if user.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
continue
avatar_path = user_avatar_path_from_ids(user.id, realm.id)
wildcard = os.path.join(local_dir, avatar_path + '.*')
for local_path in glob.glob(wildcard):
logging.info(
'Copying avatar file for user %s from %s',
user.email, local_path,
)
fn = os.path.relpath(local_path, local_dir)
output_path = os.path.join(output_dir, fn)
os.makedirs(str(os.path.dirname(output_path)), exist_ok=True)
shutil.copy2(str(local_path), str(output_path))
stat = os.stat(local_path)
record = dict(realm_id=realm.id,
user_profile_id=user.id,
user_profile_email=user.email,
s3_path=fn,
path=fn,
size=stat.st_size,
last_modified=stat.st_mtime,
content_type=None)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s", count)
with open(os.path.join(output_dir, "records.json"), "wb") as records_file:
records_file.write(orjson.dumps(records, option=orjson.OPT_INDENT_2))
def export_realm_icons(realm: Realm, local_dir: Path, output_dir: Path) -> None:
records = []
dir_relative_path = zerver.lib.upload.upload_backend.realm_avatar_and_logo_path(realm)
icons_wildcard = os.path.join(local_dir, dir_relative_path, '*')
for icon_absolute_path in glob.glob(icons_wildcard):
icon_file_name = os.path.basename(icon_absolute_path)
icon_relative_path = os.path.join(str(realm.id), icon_file_name)
output_path = os.path.join(output_dir, icon_relative_path)
os.makedirs(str(os.path.dirname(output_path)), exist_ok=True)
shutil.copy2(str(icon_absolute_path), str(output_path))
record = dict(realm_id=realm.id,
path=icon_relative_path,
s3_path=icon_relative_path)
records.append(record)
with open(os.path.join(output_dir, "records.json"), "wb") as records_file:
records_file.write(orjson.dumps(records, option=orjson.OPT_INDENT_2))
def export_emoji_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> None:
count = 0
records = []
for realm_emoji in RealmEmoji.objects.filter(realm_id=realm.id):
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=realm.id,
emoji_file_name=realm_emoji.file_name,
)
# Use 'mark_sanitized' to work around false positive caused by Pysa
# thinking that 'realm' (and thus 'attachment' and 'attachment.path_id')
# are user controlled
emoji_path = mark_sanitized(emoji_path)
local_path = os.path.join(local_dir, emoji_path)
output_path = os.path.join(output_dir, emoji_path)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
shutil.copy2(local_path, output_path)
# Realm Emoji author is optional.
author = realm_emoji.author
author_id = None
if author:
author_id = realm_emoji.author.id
record = dict(realm_id=realm.id,
author=author_id,
path=emoji_path,
s3_path=emoji_path,
file_name=realm_emoji.file_name,
name=realm_emoji.name,
deactivated=realm_emoji.deactivated)
records.append(record)
count += 1
if (count % 100 == 0):
logging.info("Finished %s", count)
with open(os.path.join(output_dir, "records.json"), "wb") as records_file:
records_file.write(orjson.dumps(records, option=orjson.OPT_INDENT_2))
def do_write_stats_file_for_realm_export(output_dir: Path) -> None:
stats_file = os.path.join(output_dir, 'stats.txt')
realm_file = os.path.join(output_dir, 'realm.json')
attachment_file = os.path.join(output_dir, 'attachment.json')
analytics_file = os.path.join(output_dir, 'analytics.json')
message_files = glob.glob(os.path.join(output_dir, 'messages-*.json'))
fns = sorted([analytics_file, attachment_file, *message_files, realm_file])
logging.info('Writing stats file: %s\n', stats_file)
with open(stats_file, 'w') as f:
for fn in fns:
f.write(os.path.basename(fn) + '\n')
with open(fn, "rb") as filename:
data = orjson.loads(filename.read())
for k in sorted(data):
f.write(f'{len(data[k]):5} {k}\n')
f.write('\n')
avatar_file = os.path.join(output_dir, 'avatars/records.json')
uploads_file = os.path.join(output_dir, 'uploads/records.json')
for fn in [avatar_file, uploads_file]:
f.write(fn+'\n')
with open(fn, "rb") as filename:
data = orjson.loads(filename.read())
f.write(f'{len(data):5} records\n')
f.write('\n')
def do_export_realm(realm: Realm, output_dir: Path, threads: int,
exportable_user_ids: Optional[Set[int]]=None,
public_only: bool=False,
consent_message_id: Optional[int]=None) -> str:
response: TableData = {}
# We need at least one thread running to export
# UserMessage rows. The management command should
# enforce this for us.
if not settings.TEST_SUITE:
assert threads >= 1
realm_config = get_realm_config()
create_soft_link(source=output_dir, in_progress=True)
logging.info("Exporting data from get_realm_config()...")
export_from_config(
response=response,
config=realm_config,
seed_object=realm,
context=dict(realm=realm, exportable_user_ids=exportable_user_ids),
)
logging.info('...DONE with get_realm_config() data')
sanity_check_output(response)
logging.info("Exporting uploaded files and avatars")
export_uploads_and_avatars(realm, output_dir)
# We (sort of) export zerver_message rows here. We write
# them to .partial files that are subsequently fleshed out
# by parallel processes to add in zerver_usermessage data.
# This is for performance reasons, of course. Some installations
# have millions of messages.
logging.info("Exporting .partial files messages")
message_ids = export_partial_message_files(realm, response, output_dir=output_dir,
public_only=public_only,
consent_message_id=consent_message_id)
logging.info('%d messages were exported', len(message_ids))
# zerver_reaction
zerver_reaction: TableData = {}
fetch_reaction_data(response=zerver_reaction, message_ids=message_ids)
response.update(zerver_reaction)
# Write realm data
export_file = os.path.join(output_dir, "realm.json")
write_data_to_file(output_file=export_file, data=response)
logging.info('Writing realm data to %s', export_file)
# Write analytics data
export_analytics_tables(realm=realm, output_dir=output_dir)
# zerver_attachment
export_attachment_table(realm=realm, output_dir=output_dir, message_ids=message_ids)
# Start parallel jobs to export the UserMessage objects.
launch_user_message_subprocesses(threads=threads, output_dir=output_dir,
consent_message_id=consent_message_id)
logging.info("Finished exporting %s", realm.string_id)
create_soft_link(source=output_dir, in_progress=False)
do_write_stats_file_for_realm_export(output_dir)
# We need to change back to the current working directory after writing
# the tarball to the output directory, otherwise the state is compromised
# for our unit tests.
reset_dir = os.getcwd()
tarball_path = output_dir.rstrip('/') + '.tar.gz'
os.chdir(os.path.dirname(output_dir))
subprocess.check_call(["tar", "-czf", tarball_path, os.path.basename(output_dir)])
os.chdir(reset_dir)
return tarball_path
def export_attachment_table(realm: Realm, output_dir: Path, message_ids: Set[int]) -> None:
response: TableData = {}
fetch_attachment_data(response=response, realm_id=realm.id, message_ids=message_ids)
output_file = os.path.join(output_dir, "attachment.json")
logging.info('Writing attachment table data to %s', output_file)
write_data_to_file(output_file=output_file, data=response)
def create_soft_link(source: Path, in_progress: bool=True) -> None:
is_done = not in_progress
if settings.DEVELOPMENT:
in_progress_link = os.path.join(settings.DEPLOY_ROOT, 'var', 'export-in-progress')
done_link = os.path.join(settings.DEPLOY_ROOT, 'var', 'export-most-recent')
else:
in_progress_link = '/home/zulip/export-in-progress'
done_link = '/home/zulip/export-most-recent'
if in_progress:
new_target = in_progress_link
else:
try:
os.remove(in_progress_link)
except FileNotFoundError:
pass
new_target = done_link
overwrite_symlink(source, new_target)
if is_done:
logging.info('See %s for output files', new_target)
def launch_user_message_subprocesses(threads: int, output_dir: Path,
consent_message_id: Optional[int]=None) -> None:
logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows', threads)
pids = {}
for shard_id in range(threads):
arguments = [
os.path.join(settings.DEPLOY_ROOT, "manage.py"),
'export_usermessage_batch',
f'--path={output_dir}',
f'--thread={shard_id}',
]
if consent_message_id is not None:
arguments.append(f'--consent-message-id={consent_message_id}')
process = subprocess.Popen(arguments)
pids[process.pid] = shard_id
while pids:
pid, status = os.wait()
shard = pids.pop(pid)
print(f'Shard {shard} finished, status {status}')
def do_export_user(user_profile: UserProfile, output_dir: Path) -> None:
response: TableData = {}
export_single_user(user_profile, response)
export_file = os.path.join(output_dir, "user.json")
write_data_to_file(output_file=export_file, data=response)
logging.info("Exporting messages")
export_messages_single_user(user_profile, output_dir)
def export_single_user(user_profile: UserProfile, response: TableData) -> None:
config = get_single_user_config()
export_from_config(
response=response,
config=config,
seed_object=user_profile,
)
def get_single_user_config() -> Config:
# This function defines the limited configuration for what data to
# export when exporting all data that a single Zulip user has
# access to in an organization.
# zerver_userprofile
user_profile_config = Config(
table='zerver_userprofile',
is_seeded=True,
exclude=['password', 'api_key'],
)
# zerver_subscription
subscription_config = Config(
table='zerver_subscription',
model=Subscription,
normal_parent=user_profile_config,
parent_key='user_profile__in',
)
# zerver_recipient
recipient_config = Config(
table='zerver_recipient',
model=Recipient,
virtual_parent=subscription_config,
id_source=('zerver_subscription', 'recipient'),
)
# zerver_stream
#
# TODO: We currently export the existence of private streams, but
# not their message history, in the "export with partial member
# consent" code path. This consistent with our documented policy,
# since that data is available to the organization administrator
# who initiated the export, but unnecessary and potentially
# confusing; it'd be better to just skip those streams from the
# export (which would require more complex export logic for the
# subscription/recipient/stream tables to exclude private streams
# with no consenting subscribers).
Config(
table='zerver_stream',
model=Stream,
virtual_parent=recipient_config,
id_source=('zerver_recipient', 'type_id'),
source_filter=lambda r: r['type'] == Recipient.STREAM,
exclude=['email_token'],
)
return user_profile_config
def export_messages_single_user(user_profile: UserProfile, output_dir: Path,
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> None:
user_message_query = UserMessage.objects.filter(user_profile=user_profile).order_by("id")
min_id = -1
dump_file_id = 1
while True:
actual_query = user_message_query.select_related(
"message", "message__sending_client").filter(id__gt=min_id)[0:chunk_size]
user_message_chunk = list(actual_query)
user_message_ids = {um.id for um in user_message_chunk}
if len(user_message_chunk) == 0:
break
message_chunk = []
for user_message in user_message_chunk:
item = model_to_dict(user_message.message)
item['flags'] = user_message.flags_list()
item['flags_mask'] = user_message.flags.mask
# Add a few nice, human-readable details
item['sending_client_name'] = user_message.message.sending_client.name
item['display_recipient'] = get_display_recipient(user_message.message.recipient)
message_chunk.append(item)
message_filename = os.path.join(output_dir, f"messages-{dump_file_id:06}.json")
logging.info("Fetched Messages for %s", message_filename)
output = {'zerver_message': message_chunk}
floatify_datetime_fields(output, 'zerver_message')
message_output: MessageOutput = dict(output)
write_message_export(message_filename, message_output)
min_id = max(user_message_ids)
dump_file_id += 1
def export_analytics_tables(realm: Realm, output_dir: Path) -> None:
response: TableData = {}
export_file = os.path.join(output_dir, "analytics.json")
logging.info("Writing analytics table data to %s", (export_file))
config = get_analytics_config()
export_from_config(
response=response,
config=config,
seed_object=realm,
)
# The seeding logic results in a duplicate zerver_realm object
# being included in the analytics data. We don't want it, as that
# data is already in `realm.json`, so we just delete it here
# before writing to disk.
del response['zerver_realm']
write_data_to_file(output_file=export_file, data=response)
def get_analytics_config() -> Config:
# The Config function defines what data to export for the
# analytics.json file in a full-realm export.
analytics_config = Config(
table='zerver_realm',
is_seeded=True,
)
Config(
table='analytics_realmcount',
model=RealmCount,
normal_parent=analytics_config,
parent_key='realm_id__in',
)
Config(
table='analytics_usercount',
model=UserCount,
normal_parent=analytics_config,
parent_key='realm_id__in',
)
Config(
table='analytics_streamcount',
model=StreamCount,
normal_parent=analytics_config,
parent_key='realm_id__in',
)
return analytics_config
def get_consented_user_ids(consent_message_id: int) -> Set[int]:
return set(Reaction.objects.filter(message__id=consent_message_id,
reaction_type="unicode_emoji",
# outbox = 1f4e4
emoji_code="1f4e4").
values_list("user_profile", flat=True))
def export_realm_wrapper(realm: Realm, output_dir: str,
threads: int, upload: bool,
public_only: bool,
delete_after_upload: bool,
percent_callback: Optional[Callable[[Any], None]]=None,
consent_message_id: Optional[int]=None) -> Optional[str]:
tarball_path = do_export_realm(realm=realm, output_dir=output_dir,
threads=threads, public_only=public_only,
consent_message_id=consent_message_id)
print(f"Finished exporting to {output_dir}")
print(f"Tarball written to {tarball_path}")
if not upload:
return None
# We upload to the `avatars` bucket because that's world-readable
# without additional configuration. We'll likely want to change
# that in the future.
print("Uploading export tarball...")
public_url = zerver.lib.upload.upload_backend.upload_export_tarball(
realm, tarball_path, percent_callback=percent_callback)
print()
print(f"Uploaded to {public_url}")
if delete_after_upload:
os.remove(tarball_path)
print(f"Successfully deleted the tarball at {tarball_path}")
return public_url
def get_realm_exports_serialized(user: UserProfile) -> List[Dict[str, Any]]:
all_exports = RealmAuditLog.objects.filter(realm=user.realm,
event_type=RealmAuditLog.REALM_EXPORTED)
exports_dict = {}
for export in all_exports:
pending = True
export_url = None
deleted_timestamp = None
failed_timestamp = None
if export.extra_data is not None:
pending = False
export_data = orjson.loads(export.extra_data)
deleted_timestamp = export_data.get('deleted_timestamp')
failed_timestamp = export_data.get('failed_timestamp')
export_path = export_data.get('export_path')
if export_path and not deleted_timestamp:
export_url = zerver.lib.upload.upload_backend.get_export_tarball_url(
user.realm, export_path)
exports_dict[export.id] = dict(
id=export.id,
export_time=export.event_time.timestamp(),
acting_user_id=export.acting_user.id,
export_url=export_url,
deleted_timestamp=deleted_timestamp,
failed_timestamp=failed_timestamp,
pending=pending,
)
return sorted(exports_dict.values(), key=lambda export_dict: export_dict['id'])
| apache-2.0 |
jbenden/ansible | lib/ansible/modules/network/cloudengine/ce_dldp_interface.py | 42 | 22885 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_dldp_interface
version_added: "2.4"
short_description: Manages interface DLDP configuration on HUAWEI CloudEngine switches.
description:
- Manages interface DLDP configuration on HUAWEI CloudEngine switches.
author:
- Zhou Zhijin (@CloudEngine-Ansible)
notes:
- If C(state=present, enable=disable), interface DLDP enable will be turned off and
related interface DLDP confuration will be cleared.
- If C(state=absent), only local_mac is supported to configure.
options:
interface:
description:
- Must be fully qualified interface name, i.e. GE1/0/1, 10GE1/0/1, 40GE1/0/22, 100GE1/0/1.
required: true
enable:
description:
- Set interface DLDP enable state.
required: false
default: null
choices: ['enable', 'disable']
mode_enable:
description:
- Set DLDP compatible-mode enable state.
required: false
default: null
choices: ['enable', 'disable']
local_mac:
description:
- Set the source MAC address for DLDP packets sent in the DLDP-compatible mode.
The value of MAC address is in H-H-H format. H contains 1 to 4 hexadecimal digits.
required: false
default: null
reset:
description:
- Specify whether reseting interface DLDP state.
required: false
default: null
choices: ['enable', 'disable']
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: DLDP interface test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure interface DLDP enable state and ensure global dldp enable is turned on"
ce_dldp_interface:
interface: 40GE2/0/1
enable: enable
provider: "{{ cli }}"
- name: "Configuire interface DLDP compatible-mode enable state and ensure interface DLDP state is already enabled"
ce_dldp_interface:
interface: 40GE2/0/1
enable: enable
mode_enable: enable
provider: "{{ cli }}"
- name: "Configuire the source MAC address for DLDP packets sent in the DLDP-compatible mode and
ensure interface DLDP state and compatible-mode enable state is already enabled"
ce_dldp_interface:
interface: 40GE2/0/1
enable: enable
mode_enable: enable
local_mac: aa-aa-aa
provider: "{{ cli }}"
- name: "Reset DLDP state of specified interface and ensure interface DLDP state is already enabled"
ce_dldp_interface:
interface: 40GE2/0/1
enable: enable
reset: enable
provider: "{{ cli }}"
- name: "Unconfigure interface DLDP local mac addreess when C(state=absent)"
ce_dldp_interface:
interface: 40GE2/0/1
state: absent
local_mac: aa-aa-aa
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"enable": "enalbe",
"interface": "40GE2/0/22",
"local_mac": "aa-aa-aa",
"mode_enable": "enable",
"reset": "enable"
}
existing:
description: k/v pairs of existing interface DLDP configration
returned: always
type: dict
sample: {
"enable": "disable",
"interface": "40GE2/0/22",
"local_mac": null,
"mode_enable": null,
"reset": "disable"
}
end_state:
description: k/v pairs of interface DLDP configration after module execution
returned: always
type: dict
sample: {
"enable": "enable",
"interface": "40GE2/0/22",
"local_mac": "00aa-00aa-00aa",
"mode_enable": "enable",
"reset": "enable"
}
updates:
description: command sent to the device
returned: always
type: list
sample: [
"dldp enable",
"dldp compatible-mode enable",
"dldp compatible-mode local-mac aa-aa-aa",
"dldp reset"
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import copy
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import ce_argument_spec, set_nc_config, get_nc_config, execute_nc_action
CE_NC_ACTION_RESET_INTF_DLDP = """
<action>
<dldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<resetIfDldp>
<ifName>%s</ifName>
</resetIfDldp>
</dldp>
</action>
"""
CE_NC_GET_INTF_DLDP_CONFIG = """
<filter type="subtree">
<dldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<dldpInterfaces>
<dldpInterface>
<ifName>%s</ifName>
<dldpEnable></dldpEnable>
<dldpCompatibleEnable></dldpCompatibleEnable>
<dldpLocalMac></dldpLocalMac>
</dldpInterface>
</dldpInterfaces>
</dldp>
</filter>
"""
CE_NC_MERGE_DLDP_INTF_CONFIG = """
<config>
<dldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<dldpInterfaces>
<dldpInterface operation="merge">
<ifName>%s</ifName>
<dldpEnable>%s</dldpEnable>
<dldpCompatibleEnable>%s</dldpCompatibleEnable>
<dldpLocalMac>%s</dldpLocalMac>
</dldpInterface>
</dldpInterfaces>
</dldp>
</config>
"""
CE_NC_CREATE_DLDP_INTF_CONFIG = """
<config>
<dldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<dldpInterfaces>
<dldpInterface operation="create">
<ifName>%s</ifName>
<dldpEnable>%s</dldpEnable>
<dldpCompatibleEnable>%s</dldpCompatibleEnable>
<dldpLocalMac>%s</dldpLocalMac>
</dldpInterface>
</dldpInterfaces>
</dldp>
</config>
"""
CE_NC_DELETE_DLDP_INTF_CONFIG = """
<config>
<dldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<dldpInterfaces>
<dldpInterface operation="delete">
<ifName>%s</ifName>
</dldpInterface>
</dldpInterfaces>
</dldp>
</config>
"""
def judge_is_mac_same(mac1, mac2):
"""Judge whether two macs are the same"""
if mac1 == mac2:
return True
list1 = re.findall(r'([0-9A-Fa-f]+)', mac1)
list2 = re.findall(r'([0-9A-Fa-f]+)', mac2)
if len(list1) != len(list2):
return False
for index, value in enumerate(list1, start=0):
if value.lstrip('0').lower() != list2[index].lstrip('0').lower():
return False
return True
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class DldpInterface(object):
"""Manage interface dldp configration"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# DLDP interface configration info
self.interface = self.module.params['interface']
self.enable = self.module.params['enable'] or None
self.reset = self.module.params['reset'] or None
self.mode_enable = self.module.params['mode_enable'] or None
self.local_mac = self.module.params['local_mac'] or None
self.state = self.module.params['state']
self.dldp_intf_conf = dict()
self.same_conf = False
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = list()
self.end_state = list()
def check_config_if_same(self):
"""Judge whether current config is the same as what we excepted"""
if self.state == 'absent':
return False
else:
if self.enable and self.enable != self.dldp_intf_conf['dldpEnable']:
return False
if self.mode_enable and self.mode_enable != self.dldp_intf_conf['dldpCompatibleEnable']:
return False
if self.local_mac:
flag = judge_is_mac_same(
self.local_mac, self.dldp_intf_conf['dldpLocalMac'])
if not flag:
return False
if self.reset and self.reset == 'enable':
return False
return True
def check_macaddr(self):
"""Check mac-address whether valid"""
valid_char = '0123456789abcdef-'
mac = self.local_mac
if len(mac) > 16:
return False
mac_list = re.findall(r'([0-9a-fA-F]+)', mac)
if len(mac_list) != 3:
return False
if mac.count('-') != 2:
return False
for _, value in enumerate(mac, start=0):
if value.lower() not in valid_char:
return False
return True
def check_params(self):
"""Check all input params"""
if not self.interface:
self.module.fail_json(msg='Error: Interface name cannot be empty.')
if self.interface:
intf_type = get_interface_type(self.interface)
if not intf_type:
self.module.fail_json(
msg='Error: Interface name of %s '
'is error.' % self.interface)
if (self.state == 'absent') and (self.reset or self.mode_enable or self.enable):
self.module.fail_json(msg="Error: It's better to use state=present when "
"configuring or unconfiguring enable, mode_enable "
"or using reset flag. state=absent is just for "
"when using local_mac param.")
if self.state == 'absent' and not self.local_mac:
self.module.fail_json(
msg="Error: Please specify local_mac parameter.")
if self.state == 'present':
if (self.dldp_intf_conf['dldpEnable'] == 'disable' and not self.enable and
(self.mode_enable or self.local_mac or self.reset)):
self.module.fail_json(msg="Error: when DLDP is already disabled on this port, "
"mode_enable, local_mac and reset parameters are not "
"expected to configure.")
if self.enable == 'disable' and (self.mode_enable or self.local_mac or self.reset):
self.module.fail_json(msg="Error: when using enable=disable, "
"mode_enable, local_mac and reset parameters "
"are not expected to configure.")
if self.local_mac and (self.mode_enable == 'disable' or
(self.dldp_intf_conf['dldpCompatibleEnable'] == 'disable' and self.mode_enable != 'enable')):
self.module.fail_json(msg="Error: when DLDP compatible-mode is disabled on this port, "
"Configuring local_mac is not allowed.")
if self.local_mac:
if not self.check_macaddr():
self.module.fail_json(
msg="Error: local_mac has invalid value %s." % self.local_mac)
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed"""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_dldp_intf_exist_config(self):
"""Get current dldp existed config"""
dldp_conf = dict()
xml_str = CE_NC_GET_INTF_DLDP_CONFIG % self.interface
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
dldp_conf['dldpEnable'] = 'disable'
dldp_conf['dldpCompatibleEnable'] = ""
dldp_conf['dldpLocalMac'] = ""
return dldp_conf
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get global DLDP info
root = ElementTree.fromstring(xml_str)
topo = root.find("data/dldp/dldpInterfaces/dldpInterface")
if topo is None:
self.module.fail_json(
msg="Error: Get current DLDP configration failed.")
for eles in topo:
if eles.tag in ["dldpEnable", "dldpCompatibleEnable", "dldpLocalMac"]:
if not eles.text:
dldp_conf[eles.tag] = ""
else:
if eles.tag == "dldpEnable" or eles.tag == "dldpCompatibleEnable":
if eles.text == 'true':
value = 'enable'
else:
value = 'disable'
else:
value = eles.text
dldp_conf[eles.tag] = value
return dldp_conf
def config_intf_dldp(self):
"""Config global dldp"""
if self.same_conf:
return
if self.state == "present":
enable = self.enable
if not self.enable:
enable = self.dldp_intf_conf['dldpEnable']
if enable == 'enable':
enable = 'true'
else:
enable = 'false'
mode_enable = self.mode_enable
if not self.mode_enable:
mode_enable = self.dldp_intf_conf['dldpCompatibleEnable']
if mode_enable == 'enable':
mode_enable = 'true'
else:
mode_enable = 'false'
local_mac = self.local_mac
if not self.local_mac:
local_mac = self.dldp_intf_conf['dldpLocalMac']
if self.enable == 'disable' and self.enable != self.dldp_intf_conf['dldpEnable']:
xml_str = CE_NC_DELETE_DLDP_INTF_CONFIG % self.interface
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "DELETE_DLDP_INTF_CONFIG")
elif self.dldp_intf_conf['dldpEnable'] == 'disable' and self.enable == 'enable':
xml_str = CE_NC_CREATE_DLDP_INTF_CONFIG % (
self.interface, 'true', mode_enable, local_mac)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "CREATE_DLDP_INTF_CONFIG")
elif self.dldp_intf_conf['dldpEnable'] == 'enable':
if mode_enable == 'false':
local_mac = ''
xml_str = CE_NC_MERGE_DLDP_INTF_CONFIG % (
self.interface, enable, mode_enable, local_mac)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "MERGE_DLDP_INTF_CONFIG")
if self.reset == 'enable':
xml_str = CE_NC_ACTION_RESET_INTF_DLDP % self.interface
ret_xml = execute_nc_action(self.module, xml_str)
self.check_response(ret_xml, "ACTION_RESET_INTF_DLDP")
self.changed = True
else:
if self.local_mac and judge_is_mac_same(self.local_mac, self.dldp_intf_conf['dldpLocalMac']):
if self.dldp_intf_conf['dldpEnable'] == 'enable':
dldp_enable = 'true'
else:
dldp_enable = 'false'
if self.dldp_intf_conf['dldpCompatibleEnable'] == 'enable':
dldp_compat_enable = 'true'
else:
dldp_compat_enable = 'false'
xml_str = CE_NC_MERGE_DLDP_INTF_CONFIG % (self.interface, dldp_enable, dldp_compat_enable, '')
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "UNDO_DLDP_INTF_LOCAL_MAC_CONFIG")
self.changed = True
def get_existing(self):
"""Get existing info"""
dldp_conf = dict()
dldp_conf['interface'] = self.interface
dldp_conf['enable'] = self.dldp_intf_conf.get('dldpEnable', None)
dldp_conf['mode_enable'] = self.dldp_intf_conf.get(
'dldpCompatibleEnable', None)
dldp_conf['local_mac'] = self.dldp_intf_conf.get('dldpLocalMac', None)
dldp_conf['reset'] = 'disable'
self.existing = copy.deepcopy(dldp_conf)
def get_proposed(self):
"""Get proposed result """
self.proposed = dict(interface=self.interface, enable=self.enable,
mode_enable=self.mode_enable, local_mac=self.local_mac,
reset=self.reset, state=self.state)
def get_update_cmd(self):
"""Get updatede commands"""
if self.same_conf:
return
if self.state == "present":
if self.enable and self.enable != self.dldp_intf_conf['dldpEnable']:
if self.enable == 'enable':
self.updates_cmd.append("dldp enable")
elif self.enable == 'disable':
self.updates_cmd.append("undo dldp enable")
if self.mode_enable and self.mode_enable != self.dldp_intf_conf['dldpCompatibleEnable']:
if self.mode_enable == 'enable':
self.updates_cmd.append("dldp compatible-mode enable")
else:
self.updates_cmd.append("undo dldp compatible-mode enable")
if self.local_mac:
flag = judge_is_mac_same(
self.local_mac, self.dldp_intf_conf['dldpLocalMac'])
if not flag:
self.updates_cmd.append(
"dldp compatible-mode local-mac %s" % self.local_mac)
if self.reset and self.reset == 'enable':
self.updates_cmd.append('dldp reset')
else:
if self.changed:
self.updates_cmd.append("undo dldp compatible-mode local-mac")
def get_end_state(self):
"""Get end state info"""
dldp_conf = dict()
self.dldp_intf_conf = self.get_dldp_intf_exist_config()
dldp_conf['interface'] = self.interface
dldp_conf['enable'] = self.dldp_intf_conf.get('dldpEnable', None)
dldp_conf['mode_enable'] = self.dldp_intf_conf.get(
'dldpCompatibleEnable', None)
dldp_conf['local_mac'] = self.dldp_intf_conf.get('dldpLocalMac', None)
dldp_conf['reset'] = 'disable'
if self.reset == 'enable':
dldp_conf['reset'] = 'enable'
self.end_state = copy.deepcopy(dldp_conf)
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def work(self):
"""Excute task"""
self.dldp_intf_conf = self.get_dldp_intf_exist_config()
self.check_params()
self.same_conf = self.check_config_if_same()
self.get_existing()
self.get_proposed()
self.config_intf_dldp()
self.get_update_cmd()
self.get_end_state()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
interface=dict(required=True, type='str'),
enable=dict(choices=['enable', 'disable'], type='str'),
reset=dict(choices=['enable', 'disable'], type='str'),
mode_enable=dict(choices=['enable', 'disable'], type='str'),
local_mac=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(ce_argument_spec)
dldp_intf_obj = DldpInterface(argument_spec)
dldp_intf_obj.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
supriyantomaftuh/syzygy | third_party/numpy/files/numpy/core/tests/test_regression.py | 11 | 55215 | from StringIO import StringIO
import pickle
import sys
import platform
import gc
import copy
from os import path
from numpy.testing import *
from numpy.testing.utils import _assert_valid_refcount, WarningManager
from numpy.compat import asbytes, asunicode, asbytes_nested
import warnings
import tempfile
import numpy as np
if sys.version_info[0] >= 3:
import io
StringIO = io.BytesIO
rlevel = 1
class TestRegression(TestCase):
def test_invalid_round(self,level=rlevel):
"""Ticket #3"""
v = 4.7599999999999998
assert_array_equal(np.array([v]),np.array(v))
def test_mem_empty(self,level=rlevel):
"""Ticket #7"""
np.empty((1,),dtype=[('x',np.int64)])
def test_pickle_transposed(self,level=rlevel):
"""Ticket #16"""
a = np.transpose(np.array([[2,9],[7,0],[3,8]]))
f = StringIO()
pickle.dump(a,f)
f.seek(0)
b = pickle.load(f)
f.close()
assert_array_equal(a,b)
def test_typeNA(self,level=rlevel):
"""Ticket #31"""
assert_equal(np.typeNA[np.int64],'Int64')
assert_equal(np.typeNA[np.uint64],'UInt64')
def test_dtype_names(self,level=rlevel):
"""Ticket #35"""
dt = np.dtype([(('name','label'),np.int32,3)])
def test_reduce(self,level=rlevel):
"""Ticket #40"""
assert_almost_equal(np.add.reduce([1.,.5],dtype=None), 1.5)
def test_zeros_order(self,level=rlevel):
"""Ticket #43"""
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_sort_bigendian(self,level=rlevel):
"""Ticket #47"""
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self,level=rlevel):
"""Ticket #49"""
c = np.arange(125).reshape((5,5,5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self,level=rlevel):
"""Ticket #50"""
f = StringIO()
ca = np.char.array(np.arange(1000,1010),itemsize=4)
ca.dump(f)
f.seek(0)
ca = np.load(f)
f.close()
def test_noncontiguous_fill(self,level=rlevel):
"""Ticket #58."""
a = np.zeros((5,3))
b = a[:,:2,]
def rs():
b.shape = (10,)
self.assertRaises(AttributeError,rs)
def test_bool(self,level=rlevel):
"""Ticket #60"""
x = np.bool_(1)
def test_indexing1(self,level=rlevel):
"""Ticket #64"""
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j,4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self,level=rlevel):
"""Ticket #65"""
descr = [('x', 'i4', (2,))]
buffer = ([3,2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self,level=rlevel):
"""Ticket #67"""
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self,level=rlevel):
"""Ticket #72"""
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False,True]))
self.assertTrue(a[1] == 'auto')
self.assertTrue(a[0] != 'auto')
b = np.linspace(0, 10, 11)
self.assertTrue(b != 'auto')
self.assertTrue(b[0] != 'auto')
def test_unicode_swapping(self,level=rlevel):
"""Ticket #79"""
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua2 = ua.newbyteorder()
def test_object_array_fill(self,level=rlevel):
"""Ticket #86"""
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self,level=rlevel):
"""Ticket #93"""
self.assertRaises(TypeError,np.dtype,
{'names':['a'],'formats':['foo']},align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
(sys.platform == "win32" and platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
def test_intp(self,level=rlevel):
"""Ticket #99"""
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width,16)
self.assertRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16)
self.assertRaises(ValueError,np.intp,'0x1',32)
assert_equal(255,np.intp('0xFF',16))
assert_equal(1024,np.intp(1024))
def test_endian_bool_indexing(self,level=rlevel):
"""Ticket #105"""
a = np.arange(10.,dtype='>f8')
b = np.arange(10.,dtype='<f8')
xa = np.where((a>2) & (a<6))
xb = np.where((b>2) & (b<6))
ya = ((a>2) & (a<6))
yb = ((b>2) & (b<6))
assert_array_almost_equal(xa,ya.nonzero())
assert_array_almost_equal(xb,yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_mem_dot(self,level=rlevel):
"""Ticket #106"""
x = np.random.randn(0,1)
y = np.random.randn(10,1)
z = np.dot(x, np.transpose(y))
def test_arange_endian(self,level=rlevel):
"""Ticket #111"""
ref = np.arange(10)
x = np.arange(10,dtype='<f8')
assert_array_equal(ref,x)
x = np.arange(10,dtype='>f8')
assert_array_equal(ref,x)
# Longfloat support is not consistent enough across
# platforms for this test to be meaningful.
# def test_longfloat_repr(self,level=rlevel):
# """Ticket #112"""
# if np.longfloat(0).itemsize > 8:
# a = np.exp(np.array([1000],dtype=np.longfloat))
# assert_(str(a)[1:9] == str(a[0])[:8])
def test_argmax(self,level=rlevel):
"""Ticket #119"""
a = np.random.normal(0,1,(4,5,6,7,8))
for i in xrange(a.ndim):
aargmax = a.argmax(i)
def test_mem_divmod(self,level=rlevel):
"""Ticket #126"""
for i in range(10):
divmod(np.array([i])[0],10)
def test_hstack_invalid_dims(self,level=rlevel):
"""Ticket #128"""
x = np.arange(9).reshape((3,3))
y = np.array([0,0,0])
self.assertRaises(ValueError,np.hstack,(x,y))
def test_squeeze_type(self,level=rlevel):
"""Ticket #133"""
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self,level=rlevel):
"""Ticket #143"""
assert_equal(0,np.add.identity)
def test_binary_repr_0(self,level=rlevel):
"""Ticket #151"""
assert_equal('0',np.binary_repr(0))
def test_rec_iterate(self,level=rlevel):
"""Ticket #160"""
descr = np.dtype([('i',int),('f',float),('s','|S3')])
x = np.rec.array([(1,1.1,'1.0'),
(2,2.2,'2.0')],dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self,level=rlevel):
"""Ticket #190"""
a = np.array('hello',np.unicode_)
b = np.array('world')
a == b
def test_tostring_FORTRANORDER_discontiguous(self,level=rlevel):
"""Fix in r2836"""
# Create discontiguous Fortran-ordered array
x = np.array(np.random.rand(3,3),order='F')[:,:2]
assert_array_almost_equal(x.ravel(),np.fromstring(x.tostring()))
def test_flat_assignment(self,level=rlevel):
"""Correct behaviour of ticket #194"""
x = np.empty((3,1))
x.flat = np.arange(3)
assert_array_almost_equal(x,[[0],[1],[2]])
x.flat = np.arange(3,dtype=float)
assert_array_almost_equal(x,[[0],[1],[2]])
def test_broadcast_flat_assignment(self,level=rlevel):
"""Ticket #194"""
x = np.empty((3,1))
def bfa(): x[:] = np.arange(3)
def bfb(): x[:] = np.arange(3,dtype=float)
self.assertRaises(ValueError, bfa)
self.assertRaises(ValueError, bfb)
def test_unpickle_dtype_with_object(self,level=rlevel):
"""Implemented in r2840"""
dt = np.dtype([('x',int),('y',np.object_),('z','O')])
f = StringIO()
pickle.dump(dt,f)
f.seek(0)
dt_ = pickle.load(f)
f.close()
assert_equal(dt,dt_)
def test_mem_array_creation_invalid_specification(self,level=rlevel):
"""Ticket #196"""
dt = np.dtype([('x',int),('y',np.object_)])
# Wrong way
self.assertRaises(ValueError, np.array, [1,'object'], dt)
# Correct way
np.array([(1,'object')],dt)
def test_recarray_single_element(self,level=rlevel):
"""Ticket #202"""
a = np.array([1,2,3],dtype=np.int32)
b = a.copy()
r = np.rec.array(a,shape=1,formats=['3i4'],names=['d'])
assert_array_equal(a,b)
assert_equal(a,r[0][0])
def test_zero_sized_array_indexing(self,level=rlevel):
"""Ticket #205"""
tmp = np.array([])
def index_tmp(): tmp[np.array(10)]
self.assertRaises(IndexError, index_tmp)
def test_chararray_rstrip(self,level=rlevel):
"""Ticket #222"""
x = np.chararray((1,),5)
x[0] = asbytes('a ')
x = x.rstrip()
assert_equal(x[0], asbytes('a'))
def test_object_array_shape(self,level=rlevel):
"""Ticket #239"""
assert_equal(np.array([[1,2],3,4],dtype=object).shape, (3,))
assert_equal(np.array([[1,2],[3,4]],dtype=object).shape, (2,2))
assert_equal(np.array([(1,2),(3,4)],dtype=object).shape, (2,2))
assert_equal(np.array([],dtype=object).shape, (0,))
assert_equal(np.array([[],[],[]],dtype=object).shape, (3,0))
assert_equal(np.array([[3,4],[5,6],None],dtype=object).shape, (3,))
def test_mem_around(self,level=rlevel):
"""Ticket #243"""
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y),decimal) <= 10.0**(-decimal)
def test_character_array_strip(self,level=rlevel):
"""Ticket #246"""
x = np.char.array(("x","x ","x "))
for c in x: assert_equal(c,"x")
def test_lexsort(self,level=rlevel):
"""Lexsort memory error"""
v = np.array([1,2,3,4,5,6,7,8,9,10])
assert_equal(np.lexsort(v),0)
def test_pickle_dtype(self,level=rlevel):
"""Ticket #251"""
import pickle
pickle.dumps(np.float)
def test_swap_real(self, level=rlevel):
"""Ticket #265"""
assert_equal(np.arange(4,dtype='>c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='<c8').imag.max(),0.0)
assert_equal(np.arange(4,dtype='>c8').real.max(),3.0)
assert_equal(np.arange(4,dtype='<c8').real.max(),3.0)
def test_object_array_from_list(self, level=rlevel):
"""Ticket #270"""
a = np.array([1,'A',None])
def test_multiple_assign(self, level=rlevel):
"""Ticket #273"""
a = np.zeros((3,1),int)
a[[1,2]] = 1
def test_empty_array_type(self, level=rlevel):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self, level=rlevel):
dt = np.dtype([('one', '<i4'),('two', '<i4')])
x = np.array((1,2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self, level=rlevel):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1= ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8,7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
assert abs(arr-res2).max() < 1e-8, func
else:
assert abs(res1-res2).max() < 1e-8, func
for func in funcs2:
arr1 = np.random.rand(8,7)
arr2 = np.random.rand(8,7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert abs(res1-res2).max() < 1e-8, func
def test_mem_lexsort_strings(self, level=rlevel):
"""Ticket #298"""
lst = ['abc','cde','fgh']
np.lexsort((lst,))
def test_fancy_index(self, level=rlevel):
"""Ticket #302"""
x = np.array([1,2])[np.array([0])]
assert_equal(x.shape,(1,))
def test_recarray_copy(self, level=rlevel):
"""Ticket #312"""
dt = [('x',np.int16),('y',np.float64)]
ra = np.array([(1,2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert ra['x'] != rb['x']
def test_rec_fromarray(self, level=rlevel):
"""Ticket #322"""
x1 = np.array([[1,2],[3,4],[5,6]])
x2 = np.array(['a','dd','xyz'])
x3 = np.array([1.1,2,3])
np.rec.fromarrays([x1,x2,x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self, level=rlevel):
x = np.empty((2,2),object)
x.flat[2] = (1,2,3)
assert_equal(x.flat[2],(1,2,3))
def test_ndmin_float64(self, level=rlevel):
"""Ticket #324"""
x = np.array([1,2,3],dtype=np.float64)
assert_equal(np.array(x,dtype=np.float32,ndmin=2).ndim,2)
assert_equal(np.array(x,dtype=np.float64,ndmin=2).ndim,2)
def test_mem_axis_minimization(self, level=rlevel):
"""Ticket #327"""
data = np.arange(5)
data = np.add.outer(data,data)
def test_mem_float_imag(self, level=rlevel):
"""Ticket #330"""
np.float64(1.0).imag
def test_dtype_tuple(self, level=rlevel):
"""Ticket #334"""
assert np.dtype('i4') == np.dtype(('i4',()))
def test_dtype_posttuple(self, level=rlevel):
"""Ticket #335"""
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self, level=rlevel):
"""Ticket #341"""
assert_equal(np.array(['X'], 'c'), asbytes('X'))
def test_string_array_size(self, level=rlevel):
"""Ticket #342"""
self.assertRaises(ValueError,
np.array,[['X'],['X','X','X']],'|S1')
def test_dtype_repr(self, level=rlevel):
"""Ticket #344"""
dt1=np.dtype(('uint32', 2))
dt2=np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self, level=rlevel):
"""Make sure reshape order works."""
a = np.arange(6).reshape(2,3,order='F')
assert_equal(a,[[0,2,4],[1,3,5]])
a = np.array([[1,2],[3,4],[5,6],[7,8]])
b = a[:,1]
assert_equal(b.reshape(2,2,order='F'), [[2,6],[4,8]])
def test_repeat_discont(self, level=rlevel):
"""Ticket #352"""
a = np.arange(12).reshape(4,3)[:,2]
assert_equal(a.repeat(3), [2,2,2,5,5,5,8,8,8,11,11,11])
def test_array_index(self, level=rlevel):
"""Make sure optimization is not called in this case."""
a = np.array([1,2,3])
a2 = np.array([[1,2,3]])
assert_equal(a[np.where(a==3)], a2[np.where(a2==3)])
def test_object_argmax(self, level=rlevel):
a = np.array([1,2,3],dtype=object)
assert a.argmax() == 2
def test_recarray_fields(self, level=rlevel):
"""Ticket #372"""
dt0 = np.dtype([('f0','i4'),('f1','i4')])
dt1 = np.dtype([('f0','i8'),('f1','i8')])
for a in [np.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)],"i4,i4"),
np.rec.array([(1,2),(3,4)]),
np.rec.fromarrays([(1,2),(3,4)],"i4,i4"),
np.rec.fromarrays([(1,2),(3,4)])]:
assert_(a.dtype in [dt0,dt1])
def test_random_shuffle(self, level=rlevel):
"""Ticket #374"""
a = np.arange(5).reshape((5,1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0),a)
def test_refcount_vdot(self, level=rlevel):
"""Changeset #3443"""
_assert_valid_refcount(np.vdot)
def test_startswith(self, level=rlevel):
ca = np.char.array(['Hi','There'])
assert_equal(ca.startswith('H'),[True,False])
def test_noncommutative_reduce_accumulate(self, level=rlevel):
"""Ticket #413"""
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self, level=rlevel):
"""Convolve should raise an error for empty input array."""
self.assertRaises(ValueError,np.convolve,[],[1])
self.assertRaises(ValueError,np.convolve,[1],[])
def test_multidim_byteswap(self, level=rlevel):
"""Ticket #449"""
r=np.array([(1,(0,1,2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256,(0,256,512))],r.dtype))
def test_string_NULL(self, level=rlevel):
"""Changeset 3557"""
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self, level=rlevel):
"""Ticket #483"""
r = np.array([[asbytes('abc')]], dtype=[('var1', '|S20')])
assert asbytes(r['var1'][0][0]) == asbytes('abc')
def test_take_output(self, level=rlevel):
"""Ensure that 'take' honours output parameter."""
x = np.arange(12).reshape((3,4))
a = np.take(x,[0,2],axis=1)
b = np.zeros_like(a)
np.take(x,[0,2],axis=1,out=b)
assert_array_equal(a,b)
def test_array_str_64bit(self, level=rlevel):
"""Ticket #501"""
s = np.array([1, np.nan],dtype=np.float64)
errstate = np.seterr(all='raise')
try:
sstr = np.array_str(s)
finally:
np.seterr(**errstate)
def test_frompyfunc_endian(self, level=rlevel):
"""Ticket #503"""
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self, level=rlevel):
"""Ticket #514"""
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s ))
def test_arr_transpose(self, level=rlevel):
"""Ticket #516"""
x = np.random.rand(*(2,)*16)
y = x.transpose(range(16))
def test_string_mergesort(self, level=rlevel):
"""Ticket #540"""
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self, level=rlevel):
"""Ticket #546"""
a = np.arange(3, dtype='>f')
assert a[a.argmax()] == a.max()
def test_rand_seed(self, level=rlevel):
"""Ticket #555"""
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self, level=rlevel):
"""Ticket #562"""
a = np.zeros(5,dtype=float)
b = np.array(a,dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
self.assertRaises(ValueError, np.fromiter, [['12',''],['13','']], str)
def test_dot_negative_stride(self, level=rlevel):
"""Ticket #588"""
x = np.array([[1,5,25,125.,625]])
y = np.array([[20.],[160.],[640.],[1280.],[1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x,z),np.dot(x,y2))
def test_object_casting(self, level=rlevel):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484,286])
y = np.zeros([484,286])
x |= y
self.assertRaises(TypeError,rs)
def test_unicode_scalar(self, level=rlevel):
"""Ticket #600"""
import cPickle
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = cPickle.loads(cPickle.dumps(el))
assert_equal(new, el)
def test_arange_non_native_dtype(self, level=rlevel):
"""Ticket #616"""
for T in ('>f4','<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0,dtype=dt).dtype,dt)
assert_equal(np.arange(0.5,dtype=dt).dtype,dt)
assert_equal(np.arange(5,dtype=dt).dtype,dt)
def test_bool_indexing_invalid_nr_elements(self, level=rlevel):
s = np.ones(10,dtype=float)
x = np.array((15,),dtype=float)
def ia(x,s): x[(s>0)]=1.0
self.assertRaises(ValueError,ia,x,s)
def test_mem_scalar_indexing(self, level=rlevel):
"""Ticket #603"""
x = np.array([0],dtype=float)
index = np.array(0,dtype=np.int32)
x[index]
def test_binary_repr_0_width(self, level=rlevel):
assert_equal(np.binary_repr(0,width=3),'000')
def test_fromstring(self, level=rlevel):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12,9,9])
def test_searchsorted_variable_length(self, level=rlevel):
x = np.array(['a','aa','b'])
y = np.array(['d','e'])
assert_equal(x.searchsorted(y), [3,3])
def test_string_argsort_with_zeros(self, level=rlevel):
"""Check argsort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1,0]))
assert_array_equal(x.argsort(kind='q'), np.array([1,0]))
def test_string_sort_with_zeros(self, level=rlevel):
"""Check sort for strings containing zeros."""
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_flat_byteorder(self, level=rlevel):
"""Ticket #657"""
x = np.arange(10)
assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
def test_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
def test_sign_bit(self, level=rlevel):
x = np.array([0,-0.0,0])
assert_equal(str(np.abs(x)),'[ 0. 0. 0.]')
def test_flat_index_byteswap(self, level=rlevel):
for dt in (np.dtype('<i4'),np.dtype('>i4')):
x = np.array([-1,0,1],dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self, level=rlevel):
"""Ticket #658"""
np.indices((0,3,4)).T.reshape(-1,3)
def test_copy_detection_corner_case2(self, level=rlevel):
"""Ticket #771: strides are not set correctly when reshaping 0-sized
arrays"""
b = np.indices((0,3,4)).T.reshape(-1,3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self, level=rlevel):
"""Ticket #633"""
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1d broadcasted slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1d -> 2d broadcasted slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert cnt(a) == cnt0_a + 1
arr0[1] = b
assert cnt(b) == cnt0_b + 1
arr[:,:] = arr0
assert cnt(a) == cnt0_a + 6
assert cnt(b) == cnt0_b + 6
arr[:,0] = None
assert cnt(a) == cnt0_a + 1
del arr, arr0
# -- 2d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:,0] = a
arr[:,1] = b
assert cnt(a) == cnt0_a + 5
assert cnt(b) == cnt0_b + 5
arr2 = arr.copy()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 10
arr2 = arr[:,0].copy()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 5
arr2 = arr.flatten()
assert cnt(a) == cnt0_a + 10
assert cnt(b) == cnt0_b + 10
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert cnt(a) == cnt0_a + 5
assert cnt(b) == cnt0_b + 5
arr3 = np.concatenate((arr1, arr2))
assert cnt(a) == cnt0_a + 5 + 5
assert cnt(b) == cnt0_b + 5 + 5
arr3 = arr1.repeat(3, axis=0)
assert cnt(a) == cnt0_a + 5 + 3*5
arr3 = arr1.take([1,2,3], axis=0)
assert cnt(a) == cnt0_a + 5 + 3
x = np.array([[0],[1],[0],[1],[1]], int)
arr3 = x.choose(arr1, arr2)
assert cnt(a) == cnt0_a + 5 + 2
assert cnt(b) == cnt0_b + 5 + 3
def test_mem_custom_float_to_array(self, level=rlevel):
"""Ticket 702"""
class MyFloat:
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp2 = tmp.astype(float)
def test_object_array_refcount_self_assign(self, level=rlevel):
"""Ticket #711"""
class VictimObject(object):
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert not arr[0].deleted
arr[:] = arr # trying to induce a segfault by doing it again...
assert not arr[0].deleted
def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
x = [1,2,3]
self.assertRaises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self, level=rlevel):
"""Ticket #713"""
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert not np.any(a)
np.setbufsize(oldsize)
def test_mem_0d_array_index(self, level=rlevel):
"""Ticket #714"""
np.zeros(10)[np.array(0)]
def test_floats_from_string(self, level=rlevel):
"""Ticket #640, floats from string"""
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_complex_dtype_printing(self, level=rlevel):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
def test_nonnative_endian_fill(self, level=rlevel):
""" Non-native endian arrays were incorrectly filled with scalars before
r5034.
"""
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self, level=rlevel):
"""Test for ticket #551, changeset r5140"""
x = np.zeros((30,40))
y = pickle.loads(pickle.dumps(x))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self, level=rlevel):
"""Ticket #788, changeset r5155"""
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
if sys.version_info[0] >= 3:
xp = pickle.load(open(filename, 'rb'), encoding='latin1')
else:
xp = pickle.load(open(filename))
xpd = xp.astype(np.float64)
assert (xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0])
def test_compress_small_type(self, level=rlevel):
"""Ticket #789, changeset 5217.
"""
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype = np.single)
try:
a.compress([True, False], axis = 1, out = b)
raise AssertionError("compress with an out which cannot be " \
"safely casted should not return "\
"successfully")
except TypeError:
pass
def test_attributes(self, level=rlevel):
"""Ticket #791
"""
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1,2,3,4],[5,6,7,8]],'jubba')
assert_(dat.info == 'jubba')
dat.resize((4,2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2,3,4],[6,3,4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32,0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2,7).info == 'jubba')
assert_(dat.compress([0,1,1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0],'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32,0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.nonzero()[0].info == 'jubba')
assert_(dat.nonzero()[1].info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2,4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0,1).info == 'jubba')
assert_(dat.take([2,3,5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
def test_recarray_tolist(self, level=rlevel):
"""Ticket #793, changeset r5215
"""
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_char_array_creation(self, level=rlevel):
a = np.array('123', dtype='c')
b = np.array(asbytes_nested(['1','2','3']))
assert_equal(a,b)
def test_unaligned_unicode_access(self, level=rlevel) :
"""Ticket #825"""
for i in range(1,9) :
msg = 'unicode offset: %d chars'%i
t = np.dtype([('a','S%d'%i),('b','U2')])
x = np.array([(asbytes('a'),u'b')], dtype=t)
if sys.version_info[0] >= 3:
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
def test_sign_for_complex_nan(self, level=rlevel):
"""Ticket 794."""
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self, level=rlevel):
"""Ticket #674"""
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert a.dtype.names[0] == "notfoo"
assert a.dtype.names[1] == "bar"
def test_for_object_scalar_creation(self, level=rlevel):
"""Ticket #816"""
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4,5])
d = np.object_([None, {}, []])
assert a is None
assert type(b) is int
assert type(b2) is float
assert type(c) is np.ndarray
assert c.dtype == object
assert d.dtype == object
def test_array_resize_method_system_error(self):
"""Ticket #840 - order should be an invalid keyword."""
x = np.array([[0,1],[2,3]])
self.assertRaises(TypeError, x.resize, (2,2), order='C')
def test_for_zero_length_in_choose(self, level=rlevel):
"Ticket #882"
a = np.array(1)
self.assertRaises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
def test_errobj_reference_leak(self, level=rlevel):
"""Ticket #955"""
old_err = np.seterr(all="ignore")
try:
z = int(0)
p = np.int32(-1)
gc.collect()
n_before = len(gc.get_objects())
z**p # this shouldn't leak a reference to errobj
gc.collect()
n_after = len(gc.get_objects())
assert n_before >= n_after, (n_before, n_after)
finally:
np.seterr(**old_err)
def test_void_scalar_with_titles(self, level=rlevel):
"""No ticket"""
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert arr[0][0] == 'john'
assert arr[0][1] == 4
def test_blasdot_uninitialized_memory(self):
"""Ticket #950"""
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in xrange(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
y.resize((0, n))
# `dot` should just return zero (m,n) matrix
z = np.dot(x, y)
assert np.all(z == 0)
assert z.shape == (m, n)
def test_zeros(self):
"""Regression test for #1061."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed dimension exceeded'
try:
np.empty(sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
"""Regression test for #1062."""
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
good = 'Maximum allowed size exceeded'
try:
a = np.arange(sz)
self.assertTrue(np.size == sz)
except ValueError, e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
except Exception, e:
self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
"""Ticket #1058"""
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))
assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))
def test_array_from_sequence_scalar_array(self):
"""Ticket #1078: segfaults when creating an array with a sequence of 0d
arrays."""
a = np.array((np.ones(2), np.array(2)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)))
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
"""Ticket #1081: weird array with strange input..."""
t = np.array([np.array([]), np.array(0, object)])
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
"""Ticket #1080."""
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
"""Ticket #1106."""
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
"""Ticket #1217, don't segfault."""
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=np.int))
def test_object_array_to_fixed_string(self):
"""Ticket #1235."""
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
"""Ticket #1240."""
a = np.array([[u'abc', u'\u03a3'], [u'asdf', u'erw']], dtype='U')
def fail():
b = np.array(a, 'S4')
self.assertRaises(UnicodeEncodeError, fail)
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert a.itemsize == 16
a = np.array([u'123', '1234'])
assert a.itemsize == 16
a = np.array(['1234', u'123', '12345'])
assert a.itemsize == 20
a = np.array([u'123', '1234', u'12345'])
assert a.itemsize == 20
a = np.array([u'123', '1234', u'1234'])
assert a.itemsize == 16
def test_misaligned_objects_segfault(self):
"""Ticket #1198 and #1267"""
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
r = repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
a3 = np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
a4 = copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
"""Ticket #1267"""
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
"""Ticket #1267"""
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
"""Ticket #1259"""
z = np.array([-1j], '<c8')
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='>c8'))
else:
# big-endian machine
assert_equal(x, np.fromstring(y.tostring(), dtype='<c8'))
def test_structured_arrays_with_objects1(self):
"""Ticket #1299"""
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert x[0,1] == x[0,0]
def test_structured_arrays_with_objects2(self):
"""Ticket #1299 second test"""
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0,stra),(1,strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert sys.getrefcount(strb) == numb
assert sys.getrefcount(stra) == numa + 2
def test_duplicate_title_and_name(self):
"""Ticket #1254"""
def func():
x = np.dtype([(('a', 'a'), 'i'), ('b', 'i')])
self.assertRaises(ValueError, func)
def test_signed_integer_division_overflow(self):
"""Ticket #1317."""
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
old_err = np.seterr(divide="ignore")
try:
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
finally:
np.seterr(**old_err)
def test_buffer_hashlib(self):
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
x = np.array([1,2,3], dtype=np.dtype('<i4'))
assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')
def test_numeric_handleError(self):
"""Ticket #1405"""
from numpy import numarray
# Just make sure this doesn't throw an exception
numarray.handleError(0, "")
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self, level=rlevel):
a = np.fromiter(range(10), dtype='b')
b = np.fromiter(range(10), dtype='B')
assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9]))
assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9]))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
np.fromstring(asbytes('aa, aa, 1.0'), sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timeinteger))]
a = np.array([], dtypes[0])
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError, e:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1,2,3),(4,5,6),(7,8,9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls,i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1,2,3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=np.float)
a.shape = (4,4)
lut = np.ones((5 + 3, 4), np.float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tostring())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, asbytes("\x01\x02\x03"))
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
ctx = WarningManager()
ctx.__enter__()
warnings.simplefilter('ignore')
assert_equal(float(x), float(x.real))
ctx.__exit__()
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in xrange(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
rep = repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([],['?','?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
assert_equal(np.dtype('S10').itemsize, 10)
A = np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
# This was throwing an exception because in ctors.c,
# discover_itemsize was calling PyObject_Length without checking
# the return code. This failed to get the length of the number 2,
# and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(TypeError, int, a)
assert_raises(TypeError, long, a)
assert_raises(TypeError, float, a)
assert_raises(TypeError, oct, a)
assert_raises(TypeError, hex, a)
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1,0]), [])
def test_objectarray_setfield(self):
# Setfield directly manipulates the raw array data,
# so is invalid for object arrays.
x = np.array([1,2,3], dtype=object)
assert_raises(RuntimeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = asbytes("hello1")
s2 = asbytes("hello2")
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
@dec.knownfailureif(sys.version_info[0] >= 3,
"a.dtype is U5 for Py 3.x. Knownfail for 1.6.x")
def test_string_astype(self):
"Ticket #1748"
s1 = asbytes('black')
s2 = asbytes('white')
s3 = asbytes('other')
a = np.array([[s1],[s2],[s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype('str')
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
"""Ticket #1756 """
s = asbytes('0123456789abcdef')
a = np.array([s]*5)
for i in range(1,17):
a1 = np.array(a, "|S%d"%i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"Ticket #1760"
r=np.fromstring('abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except:
raise AssertionError
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
AlexProfi/django-cms | cms/tests/test_signals.py | 23 | 4283 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.test.utils import override_settings
from cms.api import create_page
from cms.models import UrlconfRevision
from cms.signals import urls_need_reloading
from cms.test_utils.testcases import CMSTestCase
APP_NAME = 'SampleApp'
class SignalTester(object):
def __init__(self):
self.call_count = 0
self.calls = []
def __call__(self, *args, **kwargs):
self.call_count += 1
self.calls.append((args, kwargs))
@contextmanager
def signal_tester(signal):
env = SignalTester()
signal.connect(env, weak=True)
try:
yield env
finally:
signal.disconnect(env, weak=True)
class SignalTests(TestCase):
def test_urls_need_reloading_signal_create(self):
with signal_tester(urls_need_reloading) as env:
self.client.get('/')
self.assertEqual(env.call_count, 0)
create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
self.client.get('/')
self.assertEqual(env.call_count, 1)
def test_urls_need_reloading_signal_delete(self):
with signal_tester(urls_need_reloading) as env:
self.client.get('/')
self.assertEqual(env.call_count, 0)
page = create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
page.delete()
self.client.get('/')
self.assertEqual(env.call_count, 1)
def test_urls_need_reloading_signal_change_slug(self):
with signal_tester(urls_need_reloading) as env:
self.assertEqual(env.call_count, 0)
page = create_page(
"apphooked-page",
"nav_playground.html",
"en",
published=True,
apphook="SampleApp",
apphook_namespace="test"
)
self.client.get('/')
self.assertEqual(env.call_count, 1)
title = page.title_set.get(language="en")
title.slug += 'test'
title.save()
page.publish('en')
self.client.get('/')
self.assertEqual(env.call_count, 2)
@override_settings(
MIDDLEWARE_CLASSES=[
'cms.middleware.utils.ApphookReloadMiddleware'
] + settings.MIDDLEWARE_CLASSES,
)
class ApphooksReloadTests(CMSTestCase):
def test_urls_reloaded(self):
"""
Tests that URLs are automatically reloaded when the ApphookReload
middleware is installed.
"""
#
# Sets up an apphook'ed page, but does not yet publish it.
#
superuser = get_user_model().objects.create_superuser(
'admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser)
page.publish('en')
app_page = create_page("app_page", "nav_playground.html", "en",
created_by=superuser, parent=page,
published=False, apphook="SampleApp")
self.client.get('/') # Required to invoke the middleware
#
# Gets the current urls revision for testing against later.
#
current_revision, _ = UrlconfRevision.get_or_create_revision()
#
# Publishes the apphook. This is one of many ways to trigger the
# firing of the signal. The tests above test some of the other ways
# already.
#
app_page.publish('en')
self.client.get('/') # Required to invoke the middleware
# And, this should result in a the updating of the UrlconfRevision
new_revision, _ = UrlconfRevision.get_or_create_revision()
self.assertNotEquals(current_revision, new_revision)
| bsd-3-clause |
yograterol/django | django/contrib/staticfiles/management/commands/findstatic.py | 463 | 1745 | from __future__ import unicode_literals
import os
from django.contrib.staticfiles import finders
from django.core.management.base import LabelCommand
from django.utils.encoding import force_text
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
label = 'static file'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--first', action='store_false', dest='all',
default=True,
help="Only return the first match for each static file.")
def handle_label(self, path, **options):
verbosity = options['verbosity']
result = finders.find(path, all=options['all'])
path = force_text(path)
if verbosity >= 2:
searched_locations = ("Looking in the following locations:\n %s" %
"\n ".join(force_text(location)
for location in finders.searched_locations))
else:
searched_locations = ''
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = (force_text(os.path.realpath(path)) for path in result)
if verbosity >= 1:
file_list = '\n '.join(result)
return ("Found '%s' here:\n %s\n%s" %
(path, file_list, searched_locations))
else:
return '\n'.join(result)
else:
message = ["No matching file found for '%s'." % path]
if verbosity >= 2:
message.append(searched_locations)
if verbosity >= 1:
self.stderr.write('\n'.join(message))
| bsd-3-clause |
kickstandproject/wildcard | wildcard/openstack/common/fileutils.py | 1 | 2986 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
from wildcard.openstack.common import excutils
from wildcard.openstack.common.gettextutils import _ # noqa
from wildcard.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload and filename in _FILE_CACHE:
del _FILE_CACHE[filename]
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug(_("Reloading cached file %s") % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_if_exists(path):
"""Delete a file, but ignore file not found error.
:param path: File to delete
"""
try:
os.unlink(path)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
raise
@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
delete_if_exists(path)
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
| apache-2.0 |
dmsimard/ansible | lib/ansible/modules/wait_for.py | 4 | 25813 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: wait_for
short_description: Waits for a condition before continuing
description:
- You can wait for a set amount of time C(timeout), this is the default if nothing is specified or just C(timeout) is specified.
This does not produce an error.
- Waiting for a port to become available is useful for when services are not immediately available after their init scripts return
which is true of certain Java application servers.
- It is also useful when starting guests with the M(community.libvirt.virt) module and needing to pause until they are ready.
- This module can also be used to wait for a regex match a string to be present in a file.
- In Ansible 1.6 and later, this module can also be used to wait for a file to be available or
absent on the filesystem.
- In Ansible 1.8 and later, this module can also be used to wait for active connections to be closed before continuing, useful if a node
is being rotated out of a load balancer pool.
- For Windows targets, use the M(ansible.windows.win_wait_for) module instead.
version_added: "0.7"
options:
host:
description:
- A resolvable hostname or IP address to wait for.
type: str
default: 127.0.0.1
timeout:
description:
- Maximum number of seconds to wait for, when used with another condition it will force an error.
- When used without other conditions it is equivalent of just sleeping.
type: int
default: 300
connect_timeout:
description:
- Maximum number of seconds to wait for a connection to happen before closing and retrying.
type: int
default: 5
delay:
description:
- Number of seconds to wait before starting to poll.
type: int
default: 0
port:
description:
- Port number to poll.
- C(path) and C(port) are mutually exclusive parameters.
type: int
active_connection_states:
description:
- The list of TCP connection states which are counted as active connections.
type: list
elements: str
default: [ ESTABLISHED, FIN_WAIT1, FIN_WAIT2, SYN_RECV, SYN_SENT, TIME_WAIT ]
version_added: "2.3"
state:
description:
- Either C(present), C(started), or C(stopped), C(absent), or C(drained).
- When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections.
- When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing,
C(absent) will check that file is absent or removed.
type: str
choices: [ absent, drained, present, started, stopped ]
default: started
path:
description:
- Path to a file on the filesystem that must exist before continuing.
- C(path) and C(port) are mutually exclusive parameters.
type: path
version_added: "1.4"
search_regex:
description:
- Can be used to match a string in either a file or a socket connection.
- Defaults to a multiline regex.
type: str
version_added: "1.4"
exclude_hosts:
description:
- List of hosts or IPs to ignore when looking for active TCP connections for C(drained) state.
type: list
elements: str
version_added: "1.8"
sleep:
description:
- Number of seconds to sleep between checks.
- Before Ansible 2.3 this was hardcoded to 1 second.
type: int
default: 1
version_added: "2.3"
msg:
description:
- This overrides the normal error message from a failure to meet the required conditions.
type: str
version_added: "2.4"
notes:
- The ability to use search_regex with a port connection was added in Ansible 1.7.
- Prior to Ansible 2.4, testing for the absence of a directory or UNIX socket did not work correctly.
- Prior to Ansible 2.4, testing for the presence of a file did not work correctly if the remote user did not have read access to that file.
- Under some circumstances when using mandatory access control, a path may always be treated as being absent even if it exists, but
can't be modified or created by the remote user either.
- When waiting for a path, symbolic links will be followed. Many other modules that manipulate files do not follow symbolic links,
so operations on the path using other modules may not work exactly as expected.
seealso:
- module: ansible.builtin.wait_for_connection
- module: ansible.windows.win_wait_for
- module: community.windows.win_wait_for_process
author:
- Jeroen Hoekx (@jhoekx)
- John Jarvis (@jarv)
- Andrii Radyk (@AnderEnder)
'''
EXAMPLES = r'''
- name: Sleep for 300 seconds and continue with play
wait_for:
timeout: 300
delegate_to: localhost
- name: Wait for port 8000 to become open on the host, don't start checking for 10 seconds
wait_for:
port: 8000
delay: 10
- name: Waits for port 8000 of any IP to close active connections, don't start checking for 10 seconds
wait_for:
host: 0.0.0.0
port: 8000
delay: 10
state: drained
- name: Wait for port 8000 of any IP to close active connections, ignoring connections for specified hosts
wait_for:
host: 0.0.0.0
port: 8000
state: drained
exclude_hosts: 10.2.1.2,10.2.1.3
- name: Wait until the file /tmp/foo is present before continuing
wait_for:
path: /tmp/foo
- name: Wait until the string "completed" is in the file /tmp/foo before continuing
wait_for:
path: /tmp/foo
search_regex: completed
- name: Wait until regex pattern matches in the file /tmp/foo and print the matched group
wait_for:
path: /tmp/foo
search_regex: completed (?P<task>\w+)
register: waitfor
- debug:
msg: Completed {{ waitfor['match_groupdict']['task'] }}
- name: Wait until the lock file is removed
wait_for:
path: /var/lock/file.lock
state: absent
- name: Wait until the process is finished and pid was destroyed
wait_for:
path: /proc/3466/status
state: absent
- name: Output customized message when failed
wait_for:
path: /tmp/foo
state: present
msg: Timeout to find file /tmp/foo
# Do not assume the inventory_hostname is resolvable and delay 10 seconds at start
- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
wait_for:
port: 22
host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
search_regex: OpenSSH
delay: 10
connection: local
# Same as above but you normally have ansible_connection set in inventory, which overrides 'connection'
- name: Wait 300 seconds for port 22 to become open and contain "OpenSSH"
wait_for:
port: 22
host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}'
search_regex: OpenSSH
delay: 10
vars:
ansible_connection: local
'''
RETURN = r'''
elapsed:
description: The number of seconds that elapsed while waiting
returned: always
type: int
sample: 23
match_groups:
description: Tuple containing all the subgroups of the match as returned by U(https://docs.python.org/2/library/re.html#re.MatchObject.groups)
returned: always
type: list
sample: ['match 1', 'match 2']
match_groupdict:
description: Dictionary containing all the named subgroups of the match, keyed by the subgroup name,
as returned by U(https://docs.python.org/2/library/re.html#re.MatchObject.groupdict)
returned: always
type: dict
sample:
{
'group': 'match'
}
'''
import binascii
import datetime
import errno
import math
import os
import re
import select
import socket
import time
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.common.sys_info import get_platform_subclass
from ansible.module_utils._text import to_native
HAS_PSUTIL = False
PSUTIL_IMP_ERR = None
try:
import psutil
HAS_PSUTIL = True
# just because we can import it on Linux doesn't mean we will use it
except ImportError:
PSUTIL_IMP_ERR = traceback.format_exc()
class TCPConnectionInfo(object):
"""
This is a generic TCP Connection Info strategy class that relies
on the psutil module, which is not ideal for targets, but necessary
for cross platform support.
A subclass may wish to override some or all of these methods.
- _get_exclude_ips()
- get_active_connections()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
match_all_ips = {
socket.AF_INET: '0.0.0.0',
socket.AF_INET6: '::',
}
ipv4_mapped_ipv6_address = {
'prefix': '::ffff',
'match_all': '::ffff:0.0.0.0'
}
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(TCPConnectionInfo)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.ips = _convert_host_to_ip(module.params['host'])
self.port = int(self.module.params['port'])
self.exclude_ips = self._get_exclude_ips()
if not HAS_PSUTIL:
module.fail_json(msg=missing_required_lib('psutil'), exception=PSUTIL_IMP_ERR)
def _get_exclude_ips(self):
exclude_hosts = self.module.params['exclude_hosts']
exclude_ips = []
if exclude_hosts is not None:
for host in exclude_hosts:
exclude_ips.extend(_convert_host_to_ip(host))
return exclude_ips
def get_active_connections_count(self):
active_connections = 0
for p in psutil.process_iter():
try:
if hasattr(p, 'get_connections'):
connections = p.get_connections(kind='inet')
else:
connections = p.connections(kind='inet')
except psutil.Error:
# Process is Zombie or other error state
continue
for conn in connections:
if conn.status not in self.module.params['active_connection_states']:
continue
if hasattr(conn, 'local_address'):
(local_ip, local_port) = conn.local_address
else:
(local_ip, local_port) = conn.laddr
if self.port != local_port:
continue
if hasattr(conn, 'remote_address'):
(remote_ip, remote_port) = conn.remote_address
else:
(remote_ip, remote_port) = conn.raddr
if (conn.family, remote_ip) in self.exclude_ips:
continue
if any((
(conn.family, local_ip) in self.ips,
(conn.family, self.match_all_ips[conn.family]) in self.ips,
local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
(conn.family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
)):
active_connections += 1
return active_connections
# ===========================================
# Subclass: Linux
class LinuxTCPConnectionInfo(TCPConnectionInfo):
"""
This is a TCP Connection Info evaluation strategy class
that utilizes information from Linux's procfs. While less universal,
does allow Linux targets to not require an additional library.
"""
platform = 'Linux'
distribution = None
source_file = {
socket.AF_INET: '/proc/net/tcp',
socket.AF_INET6: '/proc/net/tcp6'
}
match_all_ips = {
socket.AF_INET: '00000000',
socket.AF_INET6: '00000000000000000000000000000000',
}
ipv4_mapped_ipv6_address = {
'prefix': '0000000000000000FFFF0000',
'match_all': '0000000000000000FFFF000000000000'
}
local_address_field = 1
remote_address_field = 2
connection_state_field = 3
def __init__(self, module):
self.module = module
self.ips = _convert_host_to_hex(module.params['host'])
self.port = "%0.4X" % int(module.params['port'])
self.exclude_ips = self._get_exclude_ips()
def _get_exclude_ips(self):
exclude_hosts = self.module.params['exclude_hosts']
exclude_ips = []
if exclude_hosts is not None:
for host in exclude_hosts:
exclude_ips.extend(_convert_host_to_hex(host))
return exclude_ips
def get_active_connections_count(self):
active_connections = 0
for family in self.source_file.keys():
if not os.path.isfile(self.source_file[family]):
continue
try:
f = open(self.source_file[family])
for tcp_connection in f.readlines():
tcp_connection = tcp_connection.strip().split()
if tcp_connection[self.local_address_field] == 'local_address':
continue
if (tcp_connection[self.connection_state_field] not in
[get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
continue
(local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
if self.port != local_port:
continue
(remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
if (family, remote_ip) in self.exclude_ips:
continue
if any((
(family, local_ip) in self.ips,
(family, self.match_all_ips[family]) in self.ips,
local_ip.startswith(self.ipv4_mapped_ipv6_address['prefix']) and
(family, self.ipv4_mapped_ipv6_address['match_all']) in self.ips,
)):
active_connections += 1
except IOError as e:
pass
finally:
f.close()
return active_connections
def _convert_host_to_ip(host):
"""
Perform forward DNS resolution on host, IP will give the same IP
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
List of tuples containing address family and IP
"""
addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)
ips = []
for family, socktype, proto, canonname, sockaddr in addrinfo:
ip = sockaddr[0]
ips.append((family, ip))
if family == socket.AF_INET:
ips.append((socket.AF_INET6, "::ffff:" + ip))
return ips
def _convert_host_to_hex(host):
"""
Convert the provided host to the format in /proc/net/tcp*
/proc/net/tcp uses little-endian four byte hex for ipv4
/proc/net/tcp6 uses little-endian per 4B word for ipv6
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
List of tuples containing address family and the
little-endian converted host
"""
ips = []
if host is not None:
for family, ip in _convert_host_to_ip(host):
hexip_nf = binascii.b2a_hex(socket.inet_pton(family, ip))
hexip_hf = ""
for i in range(0, len(hexip_nf), 8):
ipgroup_nf = hexip_nf[i:i + 8]
ipgroup_hf = socket.ntohl(int(ipgroup_nf, base=16))
hexip_hf = "%s%08X" % (hexip_hf, ipgroup_hf)
ips.append((family, hexip_hf))
return ips
def _timedelta_total_seconds(timedelta):
return (
timedelta.microseconds + 0.0 +
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
def get_connection_state_id(state):
connection_state_id = {
'ESTABLISHED': '01',
'SYN_SENT': '02',
'SYN_RECV': '03',
'FIN_WAIT1': '04',
'FIN_WAIT2': '05',
'TIME_WAIT': '06',
}
return connection_state_id[state]
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', default='127.0.0.1'),
timeout=dict(type='int', default=300),
connect_timeout=dict(type='int', default=5),
delay=dict(type='int', default=0),
port=dict(type='int'),
active_connection_states=dict(type='list', elements='str', default=['ESTABLISHED', 'FIN_WAIT1', 'FIN_WAIT2', 'SYN_RECV', 'SYN_SENT', 'TIME_WAIT']),
path=dict(type='path'),
search_regex=dict(type='str'),
state=dict(type='str', default='started', choices=['absent', 'drained', 'present', 'started', 'stopped']),
exclude_hosts=dict(type='list', elements='str'),
sleep=dict(type='int', default=1),
msg=dict(type='str'),
),
)
host = module.params['host']
timeout = module.params['timeout']
connect_timeout = module.params['connect_timeout']
delay = module.params['delay']
port = module.params['port']
state = module.params['state']
path = module.params['path']
search_regex = module.params['search_regex']
msg = module.params['msg']
if search_regex is not None:
compiled_search_re = re.compile(search_regex, re.MULTILINE)
else:
compiled_search_re = None
match_groupdict = {}
match_groups = ()
if port and path:
module.fail_json(msg="port and path parameter can not both be passed to wait_for", elapsed=0)
if path and state == 'stopped':
module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module", elapsed=0)
if path and state == 'drained':
module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module", elapsed=0)
if module.params['exclude_hosts'] is not None and state != 'drained':
module.fail_json(msg="exclude_hosts should only be with state=drained", elapsed=0)
for _connection_state in module.params['active_connection_states']:
try:
get_connection_state_id(_connection_state)
except Exception:
module.fail_json(msg="unknown active_connection_state (%s) defined" % _connection_state, elapsed=0)
start = datetime.datetime.utcnow()
if delay:
time.sleep(delay)
if not port and not path and state != 'drained':
time.sleep(timeout)
elif state in ['absent', 'stopped']:
# first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.utcnow() < end:
if path:
try:
if not os.access(path, os.F_OK):
break
except IOError:
break
elif port:
try:
s = socket.create_connection((host, port), connect_timeout)
s.shutdown(socket.SHUT_RDWR)
s.close()
except Exception:
break
# Conditions not yet met, wait and try again
time.sleep(module.params['sleep'])
else:
elapsed = datetime.datetime.utcnow() - start
if port:
module.fail_json(msg=msg or "Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
elif path:
module.fail_json(msg=msg or "Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
elif state in ['started', 'present']:
# wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.utcnow() < end:
if path:
try:
os.stat(path)
except OSError as e:
# If anything except file not present, throw an error
if e.errno != 2:
elapsed = datetime.datetime.utcnow() - start
module.fail_json(msg=msg or "Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
# File exists. Are there additional things to check?
if not compiled_search_re:
# nope, succeed!
break
try:
f = open(path)
try:
search = re.search(compiled_search_re, f.read())
if search:
if search.groupdict():
match_groupdict = search.groupdict()
if search.groups():
match_groups = search.groups()
break
finally:
f.close()
except IOError:
pass
elif port:
alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))
try:
s = socket.create_connection((host, port), min(connect_timeout, alt_connect_timeout))
except Exception:
# Failed to connect by connect_timeout. wait and try again
pass
else:
# Connected -- are there additional conditions?
if compiled_search_re:
data = ''
matched = False
while datetime.datetime.utcnow() < end:
max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.utcnow()))
(readable, w, e) = select.select([s], [], [], max_timeout)
if not readable:
# No new data. Probably means our timeout
# expired
continue
response = s.recv(1024)
if not response:
# Server shutdown
break
data += to_native(response, errors='surrogate_or_strict')
if re.search(compiled_search_re, data):
matched = True
break
# Shutdown the client socket
try:
s.shutdown(socket.SHUT_RDWR)
except socket.error as e:
if e.errno != errno.ENOTCONN:
raise
# else, the server broke the connection on its end, assume it's not ready
else:
s.close()
if matched:
# Found our string, success!
break
else:
# Connection established, success!
try:
s.shutdown(socket.SHUT_RDWR)
except socket.error as e:
if e.errno != errno.ENOTCONN:
raise
# else, the server broke the connection on its end, assume it's not ready
else:
s.close()
break
# Conditions not yet met, wait and try again
time.sleep(module.params['sleep'])
else: # while-else
# Timeout expired
elapsed = datetime.datetime.utcnow() - start
if port:
if search_regex:
module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
else:
module.fail_json(msg=msg or "Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
elif path:
if search_regex:
module.fail_json(msg=msg or "Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
else:
module.fail_json(msg=msg or "Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
elif state == 'drained':
# wait until all active connections are gone
end = start + datetime.timedelta(seconds=timeout)
tcpconns = TCPConnectionInfo(module)
while datetime.datetime.utcnow() < end:
if tcpconns.get_active_connections_count() == 0:
break
# Conditions not yet met, wait and try again
time.sleep(module.params['sleep'])
else:
elapsed = datetime.datetime.utcnow() - start
module.fail_json(msg=msg or "Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
elapsed = datetime.datetime.utcnow() - start
module.exit_json(state=state, port=port, search_regex=search_regex, match_groups=match_groups, match_groupdict=match_groupdict, path=path,
elapsed=elapsed.seconds)
if __name__ == '__main__':
main()
| gpl-3.0 |
ryfeus/lambda-packs | pytorch/source/torch/distributions/one_hot_categorical.py | 1 | 3680 | import torch
from torch.distributions import constraints
from torch.distributions.categorical import Categorical
from torch.distributions.distribution import Distribution
class OneHotCategorical(Distribution):
r"""
Creates a one-hot categorical distribution parameterized by :attr:`probs` or
:attr:`logits`.
Samples are one-hot coded vectors of size ``probs.size(-1)``.
.. note:: :attr:`probs` must be non-negative, finite and have a non-zero sum,
and it will be normalized to sum to 1.
See also: :func:`torch.distributions.Categorical` for specifications of
:attr:`probs` and :attr:`logits`.
Example::
>>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
tensor([ 0., 0., 0., 1.])
Args:
probs (Tensor): event probabilities
logits (Tensor): event log probabilities
"""
arg_constraints = {'probs': constraints.simplex,
'logits': constraints.real}
support = constraints.simplex
has_enumerate_support = True
def __init__(self, probs=None, logits=None, validate_args=None):
self._categorical = Categorical(probs, logits)
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super(OneHotCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(OneHotCategorical, _instance)
batch_shape = torch.Size(batch_shape)
new._categorical = self._categorical.expand(batch_shape)
super(OneHotCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@property
def _param(self):
return self._categorical._param
@property
def probs(self):
return self._categorical.probs
@property
def logits(self):
return self._categorical.logits
@property
def mean(self):
return self._categorical.probs
@property
def variance(self):
return self._categorical.probs * (1 - self._categorical.probs)
@property
def param_shape(self):
return self._categorical.param_shape
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
probs = self._categorical.probs
indices = self._categorical.sample(sample_shape)
if torch._C._get_tracing_state():
# [JIT WORKAROUND] lack of support for .scatter_()
eye = torch.eye(self.event_shape[-1], dtype=self._param.dtype, device=self._param.device)
return eye[indices]
one_hot = probs.new_zeros(self._extended_shape(sample_shape))
if indices.dim() < one_hot.dim():
indices = indices.unsqueeze(-1)
return one_hot.scatter_(-1, indices, 1.)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
def entropy(self):
return self._categorical.entropy()
def enumerate_support(self, expand=True):
n = self.event_shape[0]
values = torch.eye(n, dtype=self._param.dtype, device=self._param.device)
values = values.view((n,) + (1,) * len(self.batch_shape) + (n,))
if expand:
values = values.expand((n,) + self.batch_shape + (n,))
return values
| mit |
tkarna/cofs | test/swe2d/generate_basin_mms_setup.py | 2 | 4988 | """
Generates setups for 2d shallow water MMS tests
"""
import sympy
from sympy import init_printing
init_printing()
# coordinates
x, y, z = sympy.symbols('x[0] x[1] x[2]')
z_tilde = sympy.symbols('z_tilde')
# domain lenght, x in [0, Lx], y in [0, Ly]
lx, ly = sympy.symbols('lx ly')
# depth scale
h0 = sympy.symbols('h0', positive=True)
# coriolis scale
f0 = sympy.symbols('f0', positive=True)
# viscosity scale
nu0 = sympy.symbols('nu0', positive=True)
# gravitational acceleration
g = sympy.symbols('g')
# time
t = sympy.symbols('t', positive=True)
T = sympy.symbols('T', positive=True)
def get_ufl_expr(w):
"""
Generates string that can be though to be a UFL Expression"""
return str(w)
def get_scalar_entry(name, u=None, v=None, w=None):
"""Generates an entry for a scalar expression"""
t = """ out['{name}'] = {u}\n"""
def fds(u):
return '0.0' if u is None else get_ufl_expr(u)
return t.format(name=name, u=fds(u))
def get_vector_entry(name, u=None, v=None):
"""Generates an entry for a 2d vector expression"""
t = """ out['{name}'] = as_vector(
[
{u},
{v},
])\n"""
def fds(u):
return '0.0' if u is None else get_ufl_expr(u)
return t.format(name=name, u=fds(u), v=fds(v))
def get_header(name, description):
t = '''def {name}(x, lx, ly, h0, f0, nu0, g):
"""
{txt}
"""
out = {{}}\n'''
return t.format(name=name, txt=description)
def get_footer():
t = """
# NOTE boundary condititions must be set manually to something meaningful
out['bnd_funcs'] = {1: {'uv': None},
2: {'uv': None},
3: {'uv': None},
4: {'uv': None},
}
return out"""
return t
def evaluate_source_term(eta, u, v, h, f, nu, nonlin=True):
# evaluate equations
if nonlin:
depth = eta + h
else:
depth = h
div_hu = sympy.diff(depth*u, x) + sympy.diff(depth*v, y)
res_elev = sympy.diff(eta, t) + div_hu
u_x = sympy.diff(u, x)
u_y = sympy.diff(u, y)
v_x = sympy.diff(v, x)
v_y = sympy.diff(v, y)
if nonlin:
adv_u = u*u_x + v*u_y
adv_v = u*v_x + v*v_y
else:
adv_u = adv_v = 0
cori_u = -f*v
cori_v = f*u
pg_u = g*sympy.diff(eta, x)
pg_v = g*sympy.diff(eta, y)
visc_u = -(2*sympy.diff(nu*sympy.diff(u, x), x)
+ sympy.diff(nu*sympy.diff(u, y), y)
+ sympy.diff(nu*sympy.diff(v, x), y))
visc_v = -(2*sympy.diff(nu*sympy.diff(v, y), y)
+ sympy.diff(nu*sympy.diff(v, x), x)
+ sympy.diff(nu*sympy.diff(u, y), x))
visc_u += -sympy.diff(depth, x)/depth * nu * 2 * sympy.diff(u, x)
visc_v += -sympy.diff(depth, y)/depth * nu * 2 * sympy.diff(v, y)
res_u = sympy.diff(u, t) + adv_u + cori_u + pg_u + visc_u
res_v = sympy.diff(v, t) + adv_v + cori_v + pg_v + visc_v
return res_elev, res_u, res_v
def generate_setup(name, description, h, f, eta, u, v, nu):
"""
Generates setup function that can be copied to mms test.
"""
res_elev, res_u, res_v = evaluate_source_term(eta, u, v, h, f, nu)
txt = ''
txt += get_header(name, description)
txt += get_scalar_entry('bath_expr', h)
if f != 0.0:
txt += get_scalar_entry('cori_expr', f)
if nu != 0.0:
txt += get_scalar_entry('visc_expr', nu)
txt += get_scalar_entry('elev_expr', eta)
txt += get_vector_entry('uv_expr', u=u, v=v)
txt += get_scalar_entry('res_elev_expr', res_elev)
txt += get_vector_entry('res_uv_expr', u=res_u, v=res_v)
txt += get_footer()
print('')
print('')
print(txt)
name = 'setup7'
description = """Non-trivial Coriolis, bath, elev, u and v, tangential velocity is zero at bnd to test flux BCs"""
h = 4.0 + h0*sympy.sqrt(0.3*x*x + 0.2*y*y + 0.1)/lx
f = f0*sympy.cos(sympy.pi*(x + y)/lx)
nu = 0
eta = sympy.cos(sympy.pi*(3.0*x + 1.0*y)/lx)
u = sympy.sin(sympy.pi*(-2.0*x + 1.0*y)/lx)*sympy.sin(sympy.pi*y/ly)
v = 0.5*sympy.sin(sympy.pi*x/lx)*sympy.sin(sympy.pi*(-3.0*x + 1.0*y)/lx)
generate_setup(name, description, h, f, eta, u, v, nu)
name = 'setup8'
description = """Non-trivial Coriolis, bath, elev, u and v, tangential velocity is non-zero at bnd, must prescribe uv at boundary."""
h = 4.0 + h0*sympy.sqrt(0.3*x*x + 0.2*y*y + 0.1)/lx
f = f0*sympy.cos(sympy.pi*(x + y)/lx)
nu = 0
eta = sympy.cos(sympy.pi*(3.0*x + 1.0*y)/lx)
u = sympy.sin(sympy.pi*(-2.0*x + 1.0*y)/lx)
v = 0.5*sympy.sin(sympy.pi*(-3.0*x + 1.0*y)/lx)
generate_setup(name, description, h, f, eta, u, v, nu)
name = 'setup9'
description = 'No Coriolis, non-trivial bath, viscosity, elev, u and v.'
h = 4.0 + h0*sympy.sqrt(0.3*x*x + 0.2*y*y + 0.1)/lx
f = 0
eta = sympy.cos(sympy.pi*(3.0*x + 1.0*y)/lx)
u = sympy.sin(sympy.pi*(-2.0*x + 1.0*y)/lx)
v = 0.5*sympy.sin(sympy.pi*(-3.0*x + 1.0*y)/lx)
nu = nu0*(1.0 + x/lx)
generate_setup(name, description, h, f, eta, u, v, nu)
| mit |
hmcmooc/muddx-platform | common/lib/capa/capa/tests/response_xml_factory.py | 47 | 34688 | from lxml import etree
from abc import ABCMeta, abstractmethod
class ResponseXMLFactory(object):
""" Abstract base class for capa response XML factories.
Subclasses override create_response_element and
create_input_element to produce XML of particular response types"""
__metaclass__ = ABCMeta
@abstractmethod
def create_response_element(self, **kwargs):
""" Subclasses override to return an etree element
representing the capa response XML
(e.g. <numericalresponse>).
The tree should NOT contain any input elements
(such as <textline />) as these will be added later."""
return None
@abstractmethod
def create_input_element(self, **kwargs):
""" Subclasses override this to return an etree element
representing the capa input XML (such as <textline />)"""
return None
def build_xml(self, **kwargs):
""" Construct an XML string for a capa response
based on **kwargs.
**kwargs is a dictionary that will be passed
to create_response_element() and create_input_element().
See the subclasses below for other keyword arguments
you can specify.
For all response types, **kwargs can contain:
*question_text*: The text of the question to display,
wrapped in <p> tags.
*explanation_text*: The detailed explanation that will
be shown if the user answers incorrectly.
*script*: The embedded Python script (a string)
*num_responses*: The number of responses to create [DEFAULT: 1]
*num_inputs*: The number of input elements
to create [DEFAULT: 1]
Returns a string representation of the XML tree.
"""
# Retrieve keyward arguments
question_text = kwargs.get('question_text', '')
explanation_text = kwargs.get('explanation_text', '')
script = kwargs.get('script', None)
num_responses = kwargs.get('num_responses', 1)
num_inputs = kwargs.get('num_inputs', 1)
# The root is <problem>
root = etree.Element("problem")
# Add a script if there is one
if script:
script_element = etree.SubElement(root, "script")
script_element.set("type", "loncapa/python")
script_element.text = str(script)
# The problem has a child <p> with question text
question = etree.SubElement(root, "p")
question.text = question_text
# Add the response(s)
for i in range(0, int(num_responses)):
response_element = self.create_response_element(**kwargs)
root.append(response_element)
# Add input elements
for j in range(0, int(num_inputs)):
input_element = self.create_input_element(**kwargs)
if not (None == input_element):
response_element.append(input_element)
# The problem has an explanation of the solution
if explanation_text:
explanation = etree.SubElement(root, "solution")
explanation_div = etree.SubElement(explanation, "div")
explanation_div.set("class", "detailed-solution")
explanation_div.text = explanation_text
return etree.tostring(root)
@staticmethod
def textline_input_xml(**kwargs):
""" Create a <textline/> XML element
Uses **kwargs:
*math_display*: If True, then includes a MathJax display of user input
*size*: An integer representing the width of the text line
"""
math_display = kwargs.get('math_display', False)
size = kwargs.get('size', None)
input_element = etree.Element('textline')
if math_display:
input_element.set('math', '1')
if size:
input_element.set('size', str(size))
return input_element
@staticmethod
def choicegroup_input_xml(**kwargs):
""" Create a <choicegroup> XML element
Uses **kwargs:
*choice_type*: Can be "checkbox", "radio", or "multiple"
*choices*: List of True/False values indicating whether
a particular choice is correct or not.
Users must choose *all* correct options in order
to be marked correct.
DEFAULT: [True]
*choice_names": List of strings identifying the choices.
If specified, you must ensure that
len(choice_names) == len(choices)
"""
# Names of group elements
group_element_names = {'checkbox': 'checkboxgroup',
'radio': 'radiogroup',
'multiple': 'choicegroup'}
# Retrieve **kwargs
choices = kwargs.get('choices', [True])
choice_type = kwargs.get('choice_type', 'multiple')
choice_names = kwargs.get('choice_names', [None] * len(choices))
# Create the <choicegroup>, <checkboxgroup>, or <radiogroup> element
assert(choice_type in group_element_names)
group_element = etree.Element(group_element_names[choice_type])
# Create the <choice> elements
for (correct_val, name) in zip(choices, choice_names):
choice_element = etree.SubElement(group_element, "choice")
choice_element.set("correct", "true" if correct_val else "false")
# Add a name identifying the choice, if one exists
# For simplicity, we use the same string as both the
# name attribute and the text of the element
if name:
choice_element.text = str(name)
choice_element.set("name", str(name))
return group_element
class NumericalResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <numericalresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <numericalresponse> XML element.
Uses **kwarg keys:
*answer*: The correct answer (e.g. "5")
*tolerance*: The tolerance within which a response
is considered correct. Can be a decimal (e.g. "0.01")
or percentage (e.g. "2%")
"""
answer = kwargs.get('answer', None)
tolerance = kwargs.get('tolerance', None)
response_element = etree.Element('numericalresponse')
if answer:
if isinstance(answer, float):
response_element.set('answer', repr(answer))
else:
response_element.set('answer', str(answer))
if tolerance:
responseparam_element = etree.SubElement(response_element, 'responseparam')
responseparam_element.set('type', 'tolerance')
responseparam_element.set('default', str(tolerance))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class CustomResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <customresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <customresponse> XML element.
Uses **kwargs:
*cfn*: the Python code to run. Can be inline code,
or the name of a function defined in earlier <script> tags.
Should have the form: cfn(expect, answer_given, student_answers)
where expect is a value (see below),
answer_given is a single value (for 1 input)
or a list of values (for multiple inputs),
and student_answers is a dict of answers by input ID.
*expect*: The value passed to the function cfn
*answer*: Inline script that calculates the answer
"""
# Retrieve **kwargs
cfn = kwargs.get('cfn', None)
expect = kwargs.get('expect', None)
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
cfn_extra_args = kwargs.get('cfn_extra_args', None)
# Create the response element
response_element = etree.Element("customresponse")
if cfn:
response_element.set('cfn', str(cfn))
if expect:
response_element.set('expect', str(expect))
if answer:
answer_element = etree.SubElement(response_element, "answer")
answer_element.text = str(answer)
if options:
response_element.set('options', str(options))
if cfn_extra_args:
response_element.set('cfn_extra_args', str(cfn_extra_args))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <symbolicresponse> XML trees """
def create_response_element(self, **kwargs):
cfn = kwargs.get('cfn', None)
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
response_element = etree.Element("symbolicresponse")
if cfn:
response_element.set('cfn', str(cfn))
if answer:
response_element.set('answer', str(answer))
if options:
response_element.set('options', str(options))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class SchematicResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <schematicresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create the <schematicresponse> XML element.
Uses *kwargs*:
*answer*: The Python script used to evaluate the answer.
"""
answer_script = kwargs.get('answer', None)
# Create the <schematicresponse> element
response_element = etree.Element("schematicresponse")
# Insert the <answer> script if one is provided
if answer_script:
answer_element = etree.SubElement(response_element, "answer")
answer_element.set("type", "loncapa/python")
answer_element.text = str(answer_script)
return response_element
def create_input_element(self, **kwargs):
""" Create the <schematic> XML element.
Although <schematic> can have several attributes,
(*height*, *width*, *parts*, *analyses*, *submit_analysis*, and *initial_value*),
none of them are used in the capa module.
For testing, we create a bare-bones version of <schematic>."""
return etree.Element("schematic")
class CodeResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <coderesponse> XML trees """
def build_xml(self, **kwargs):
# Since we are providing an <answer> tag,
# we should override the default behavior
# of including a <solution> tag as well
kwargs['explanation_text'] = None
return super(CodeResponseXMLFactory, self).build_xml(**kwargs)
def create_response_element(self, **kwargs):
"""
Create a <coderesponse> XML element.
Uses **kwargs:
*initial_display*: The code that initially appears in the textbox
[DEFAULT: "Enter code here"]
*answer_display*: The answer to display to the student
[DEFAULT: "This is the correct answer!"]
*grader_payload*: A JSON-encoded string sent to the grader
[DEFAULT: empty dict string]
*allowed_files*: A space-separated string of file names.
[DEFAULT: None]
*required_files*: A space-separated string of file names.
[DEFAULT: None]
"""
# Get **kwargs
initial_display = kwargs.get("initial_display", "Enter code here")
answer_display = kwargs.get("answer_display", "This is the correct answer!")
grader_payload = kwargs.get("grader_payload", '{}')
allowed_files = kwargs.get("allowed_files", None)
required_files = kwargs.get("required_files", None)
# Create the <coderesponse> element
response_element = etree.Element("coderesponse")
# If files are involved, create the <filesubmission> element.
has_files = allowed_files or required_files
if has_files:
filesubmission_element = etree.SubElement(response_element, "filesubmission")
if allowed_files:
filesubmission_element.set("allowed_files", allowed_files)
if required_files:
filesubmission_element.set("required_files", required_files)
# Create the <codeparam> element.
codeparam_element = etree.SubElement(response_element, "codeparam")
# Set the initial display text
initial_element = etree.SubElement(codeparam_element, "initial_display")
initial_element.text = str(initial_display)
# Set the answer display text
answer_element = etree.SubElement(codeparam_element, "answer_display")
answer_element.text = str(answer_display)
# Set the grader payload string
grader_element = etree.SubElement(codeparam_element, "grader_payload")
grader_element.text = str(grader_payload)
# Create the input within the response
if not has_files:
input_element = etree.SubElement(response_element, "textbox")
input_element.set("mode", "python")
return response_element
def create_input_element(self, **kwargs):
# Since we create this in create_response_element(),
# return None here
return None
class ChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <choiceresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <choiceresponse> element """
return etree.Element("choiceresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element."""
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class FormulaResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <formularesponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <formularesponse> element.
*sample_dict*: A dictionary of the form:
{ VARIABLE_NAME: (MIN, MAX), ....}
This specifies the range within which
to numerically sample each variable to check
student answers.
[REQUIRED]
*num_samples*: The number of times to sample the student's answer
to numerically compare it to the correct answer.
*tolerance*: The tolerance within which answers will be accepted
[DEFAULT: 0.01]
*answer*: The answer to the problem. Can be a formula string
or a Python variable defined in a script
(e.g. "$calculated_answer" for a Python variable
called calculated_answer)
[REQUIRED]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the formula for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
"""
# Retrieve kwargs
sample_dict = kwargs.get("sample_dict", None)
num_samples = kwargs.get("num_samples", None)
tolerance = kwargs.get("tolerance", 0.01)
answer = kwargs.get("answer", None)
hint_list = kwargs.get("hints", None)
assert(answer)
assert(sample_dict and num_samples)
# Create the <formularesponse> element
response_element = etree.Element("formularesponse")
# Set the sample information
sample_str = self._sample_str(sample_dict, num_samples, tolerance)
response_element.set("samples", sample_str)
# Set the tolerance
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("type", "tolerance")
responseparam_element.set("default", str(tolerance))
# Set the answer
response_element.set("answer", str(answer))
# Include hints, if specified
if hint_list:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
for (hint_prompt, hint_name, hint_text) in hint_list:
# For each hint, create a <formulahint> element
formulahint_element = etree.SubElement(hintgroup_element, "formulahint")
# We could sample a different range, but for simplicity,
# we use the same sample string for the hints
# that we used previously.
formulahint_element.set("samples", sample_str)
formulahint_element.set("answer", str(hint_prompt))
formulahint_element.set("name", str(hint_name))
# For each hint, create a <hintpart> element
# corresponding to the <formulahint>
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
text_element = etree.SubElement(hintpart_element, "text")
text_element.text = str(hint_text)
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
def _sample_str(self, sample_dict, num_samples, tolerance):
# Loncapa uses a special format for sample strings:
# "x,y,z@4,5,3:10,12,8#4" means plug in values for (x,y,z)
# from within the box defined by points (4,5,3) and (10,12,8)
# The "#4" means to repeat 4 times.
variables = [str(v) for v in sample_dict.keys()]
low_range_vals = [str(f[0]) for f in sample_dict.values()]
high_range_vals = [str(f[1]) for f in sample_dict.values()]
sample_str = (",".join(sample_dict.keys()) + "@" +
",".join(low_range_vals) + ":" +
",".join(high_range_vals) +
"#" + str(num_samples))
return sample_str
class ImageResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <imageresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <imageresponse> element."""
return etree.Element("imageresponse")
def create_input_element(self, **kwargs):
""" Create the <imageinput> element.
Uses **kwargs:
*src*: URL for the image file [DEFAULT: "/static/image.jpg"]
*width*: Width of the image [DEFAULT: 100]
*height*: Height of the image [DEFAULT: 100]
*rectangle*: String representing the rectangles the user should select.
Take the form "(x1,y1)-(x2,y2)", where the two (x,y)
tuples define the corners of the rectangle.
Can include multiple rectangles separated by a semicolon, e.g.
"(490,11)-(556,98);(242,202)-(296,276)"
*regions*: String representing the regions a user can select
Take the form "[ [[x1,y1], [x2,y2], [x3,y3]],
[[x1,y1], [x2,y2], [x3,y3]] ]"
(Defines two regions, each with 3 points)
REQUIRED: Either *rectangle* or *region* (or both)
"""
# Get the **kwargs
src = kwargs.get("src", "/static/image.jpg")
width = kwargs.get("width", 100)
height = kwargs.get("height", 100)
rectangle = kwargs.get('rectangle', None)
regions = kwargs.get('regions', None)
assert(rectangle or regions)
# Create the <imageinput> element
input_element = etree.Element("imageinput")
input_element.set("src", str(src))
input_element.set("width", str(width))
input_element.set("height", str(height))
if rectangle:
input_element.set("rectangle", rectangle)
if regions:
input_element.set("regions", regions)
return input_element
class JavascriptResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <javascriptresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <javascriptresponse> element.
Uses **kwargs:
*generator_src*: Name of the JS file to generate the problem.
*grader_src*: Name of the JS file to grade the problem.
*display_class*: Name of the class used to display the problem
*display_src*: Name of the JS file used to display the problem
*param_dict*: Dictionary of parameters to pass to the JS
"""
# Get **kwargs
generator_src = kwargs.get("generator_src", None)
grader_src = kwargs.get("grader_src", None)
display_class = kwargs.get("display_class", None)
display_src = kwargs.get("display_src", None)
param_dict = kwargs.get("param_dict", {})
# Both display_src and display_class given,
# or neither given
assert((display_src and display_class) or
(not display_src and not display_class))
# Create the <javascriptresponse> element
response_element = etree.Element("javascriptresponse")
if generator_src:
generator_element = etree.SubElement(response_element, "generator")
generator_element.set("src", str(generator_src))
if grader_src:
grader_element = etree.SubElement(response_element, "grader")
grader_element.set("src", str(grader_src))
if display_class and display_src:
display_element = etree.SubElement(response_element, "display")
display_element.set("class", str(display_class))
display_element.set("src", str(display_src))
for (param_name, param_val) in param_dict.items():
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("name", str(param_name))
responseparam_element.set("value", str(param_val))
return response_element
def create_input_element(self, **kwargs):
""" Create the <javascriptinput> element """
return etree.Element("javascriptinput")
class MultipleChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <multiplechoiceresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <multiplechoiceresponse> element"""
return etree.Element('multiplechoiceresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class TrueFalseResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <truefalseresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <truefalseresponse> element"""
return etree.Element('truefalseresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class OptionResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <optionresponse> XML"""
def create_response_element(self, **kwargs):
""" Create the <optionresponse> element"""
return etree.Element("optionresponse")
def create_input_element(self, **kwargs):
""" Create the <optioninput> element.
Uses **kwargs:
*options*: a list of possible options the user can choose from [REQUIRED]
You must specify at least 2 options.
*correct_option*: the correct choice from the list of options [REQUIRED]
"""
options_list = kwargs.get('options', None)
correct_option = kwargs.get('correct_option', None)
assert(options_list and correct_option)
assert(len(options_list) > 1)
assert(correct_option in options_list)
# Create the <optioninput> element
optioninput_element = etree.Element("optioninput")
# Set the "options" attribute
# Format: "('first', 'second', 'third')"
options_attr_string = u",".join([u"'{}'".format(o) for o in options_list])
options_attr_string = u"({})".format(options_attr_string)
optioninput_element.set('options', options_attr_string)
# Set the "correct" attribute
optioninput_element.set('correct', str(correct_option))
return optioninput_element
class StringResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <stringresponse> XML """
def create_response_element(self, **kwargs):
""" Create a <stringresponse> XML element.
Uses **kwargs:
*answer*: The correct answer (a string) [REQUIRED]
*case_sensitive*: Whether the response is case-sensitive (True/False)
[DEFAULT: True]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the string for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
*hintfn*: The name of a function in the script to use for hints.
*regexp*: Whether the response is regexp
*additional_answers*: list of additional asnwers.
"""
# Retrieve the **kwargs
answer = kwargs.get("answer", None)
case_sensitive = kwargs.get("case_sensitive", None)
hint_list = kwargs.get('hints', None)
hint_fn = kwargs.get('hintfn', None)
regexp = kwargs.get('regexp', None)
additional_answers = kwargs.get('additional_answers', [])
assert answer
# Create the <stringresponse> element
response_element = etree.Element("stringresponse")
# Set the answer attribute
response_element.set("answer", unicode(answer))
# Set the case sensitivity and regexp:
type_value = ''
if case_sensitive is not None:
type_value += "cs" if case_sensitive else "ci"
type_value += ' regexp' if regexp else ''
if type_value:
response_element.set("type", type_value.strip())
# Add the hints if specified
if hint_list or hint_fn:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
if hint_list:
assert not hint_fn
for (hint_prompt, hint_name, hint_text) in hint_list:
stringhint_element = etree.SubElement(hintgroup_element, "stringhint")
stringhint_element.set("answer", str(hint_prompt))
stringhint_element.set("name", str(hint_name))
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
hint_text_element = etree.SubElement(hintpart_element, "text")
hint_text_element.text = str(hint_text)
if hint_fn:
assert not hint_list
hintgroup_element.set("hintfn", hint_fn)
for additional_answer in additional_answers:
etree.SubElement(response_element, "additional_answer").text = additional_answer
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class AnnotationResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <annotationresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <annotationresponse> element """
return etree.Element("annotationresponse")
def create_input_element(self, **kwargs):
""" Create a <annotationinput> element."""
input_element = etree.Element("annotationinput")
text_children = [
{'tag': 'title', 'text': kwargs.get('title', 'super cool annotation')},
{'tag': 'text', 'text': kwargs.get('text', 'texty text')},
{'tag': 'comment', 'text':kwargs.get('comment', 'blah blah erudite comment blah blah')},
{'tag': 'comment_prompt', 'text': kwargs.get('comment_prompt', 'type a commentary below')},
{'tag': 'tag_prompt', 'text': kwargs.get('tag_prompt', 'select one tag')}
]
for child in text_children:
etree.SubElement(input_element, child['tag']).text = child['text']
default_options = [('green', 'correct'),('eggs', 'incorrect'), ('ham', 'partially-correct')]
options = kwargs.get('options', default_options)
options_element = etree.SubElement(input_element, 'options')
for (description, correctness) in options:
option_element = etree.SubElement(options_element, 'option', {'choice': correctness})
option_element.text = description
return input_element
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <symbolicresponse> xml """
def create_response_element(self, **kwargs):
""" Build the <symbolicresponse> XML element.
Uses **kwargs:
*expect*: The correct answer (a sympy string)
*options*: list of option strings to pass to symmath_check
(e.g. 'matrix', 'qbit', 'imaginary', 'numerical')"""
# Retrieve **kwargs
expect = kwargs.get('expect', '')
options = kwargs.get('options', [])
# Symmath check expects a string of options
options_str = ",".join(options)
# Construct the <symbolicresponse> element
response_element = etree.Element('symbolicresponse')
if expect:
response_element.set('expect', str(expect))
if options_str:
response_element.set('options', str(options_str))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class ChoiceTextResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <choicetextresponse> xml """
def create_response_element(self, **kwargs):
""" Create a <choicetextresponse> element """
return etree.Element("choicetextresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element.
choices can be specified in the following format:
[("true", [{"answer": "5", "tolerance": 0}]),
("false", [{"answer": "5", "tolerance": 0}])
]
This indicates that the first checkbox/radio is correct and it
contains a numtolerance_input with an answer of 5 and a tolerance of 0
It also indicates that the second has a second incorrect radiobutton
or checkbox with a numtolerance_input.
"""
choices = kwargs.get('choices', [("true", {})])
choice_inputs = []
# Ensure that the first element of choices is an ordered
# collection. It will start as a list, a tuple, or not a Container.
if type(choices[0]) not in [list, tuple]:
choices = [choices]
for choice in choices:
correctness, answers = choice
numtolerance_inputs = []
# If the current `choice` contains any("answer": number)
# elements, turn those into numtolerance_inputs
if answers:
# `answers` will be a list or tuple of answers or a single
# answer, representing the answers for numtolerance_inputs
# inside of this specific choice.
# Make sure that `answers` is an ordered collection for
# convenience.
if type(answers) not in [list, tuple]:
answers = [answers]
numtolerance_inputs = [
self._create_numtolerance_input_element(answer)
for answer in answers
]
choice_inputs.append(
self._create_choice_element(
correctness=correctness,
inputs=numtolerance_inputs
)
)
# Default type is 'radiotextgroup'
input_type = kwargs.get('type', 'radiotextgroup')
input_element = etree.Element(input_type)
for ind, choice in enumerate(choice_inputs):
# Give each choice text equal to it's position(0,1,2...)
choice.text = "choice_{0}".format(ind)
input_element.append(choice)
return input_element
def _create_choice_element(self, **kwargs):
"""
Creates a choice element for a choictextproblem.
Defaults to a correct choice with no numtolerance_input
"""
text = kwargs.get('text', '')
correct = kwargs.get('correctness', "true")
inputs = kwargs.get('inputs', [])
choice_element = etree.Element("choice")
choice_element.set("correct", correct)
choice_element.text = text
for inp in inputs:
# Add all of the inputs as children of this choice
choice_element.append(inp)
return choice_element
def _create_numtolerance_input_element(self, params):
"""
Creates a <numtolerance_input/> or <decoy_input/> element with
optionally specified tolerance and answer.
"""
answer = params['answer'] if 'answer' in params else None
# If there is not an answer specified, Then create a <decoy_input/>
# otherwise create a <numtolerance_input/> and set its tolerance
# and answer attributes.
if answer:
text_input = etree.Element("numtolerance_input")
text_input.set('answer', answer)
# If tolerance was specified, was specified use it, otherwise
# Set the tolerance to "0"
text_input.set(
'tolerance',
params['tolerance'] if 'tolerance' in params else "0"
)
else:
text_input = etree.Element("decoy_input")
return text_input
| agpl-3.0 |
datalogics-robb/scons | test/SHLIBSUFFIX.py | 2 | 1597 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import sys
import TestSCons
dll_ = TestSCons.dll_
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(SHLIBSUFFIX = '.shlib')
env.SharedLibrary(target = 'foo', source = 'foo.c')
""")
test.write('foo.c', r"""
#include <stdio.h>
void
foo(void)
{
printf("foo.c\n");
}
""")
test.run(arguments = '.')
test.fail_test(not os.path.exists(test.workpath(dll_ + 'foo.shlib')))
test.pass_test()
| mit |
sankhesh/VTK | Imaging/Core/Testing/Python/TestAllLogic.py | 26 | 3380 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestAllLogic(vtk.test.Testing.vtkTest):
def testAllLogic(self):
# append multiple displaced spheres into an RGB image.
# Image pipeline
renWin = vtk.vtkRenderWindow()
logics = ["And", "Or", "Xor", "Nand", "Nor", "Not"]
types = ["Float", "Double", "UnsignedInt", "UnsignedLong", "UnsignedShort", "UnsignedChar"]
sphere1 = list()
sphere2 = list()
logic = list()
mapper = list()
actor = list()
imager = list()
for idx, operator in enumerate(logics):
ScalarType = types[idx]
sphere1.append(vtk.vtkImageEllipsoidSource())
sphere1[idx].SetCenter(95, 100, 0)
sphere1[idx].SetRadius(70, 70, 70)
eval('sphere1[idx].SetOutputScalarTypeTo' + ScalarType + '()')
sphere1[idx].Update()
sphere2.append(vtk.vtkImageEllipsoidSource())
sphere2[idx].SetCenter(161, 100, 0)
sphere2[idx].SetRadius(70, 70, 70)
eval('sphere2[idx].SetOutputScalarTypeTo' + ScalarType + '()')
sphere2[idx].Update()
logic.append(vtk.vtkImageLogic())
logic[idx].SetInput1Data(sphere1[idx].GetOutput())
if operator != "Not":
logic[idx].SetInput2Data(sphere2[idx].GetOutput())
logic[idx].SetOutputTrueValue(150)
eval('logic[idx].SetOperationTo' + operator + '()')
mapper.append(vtk.vtkImageMapper())
mapper[idx].SetInputConnection(logic[idx].GetOutputPort())
mapper[idx].SetColorWindow(255)
mapper[idx].SetColorLevel(127.5)
actor.append(vtk.vtkActor2D())
actor[idx].SetMapper(mapper[idx])
imager.append(vtk.vtkRenderer())
imager[idx].AddActor2D(actor[idx])
renWin.AddRenderer(imager[idx])
imager[0].SetViewport(0, .5, .33, 1)
imager[1].SetViewport(.33, .5, .66, 1)
imager[2].SetViewport(.66, .5, 1, 1)
imager[3].SetViewport(0, 0, .33, .5)
imager[4].SetViewport(.33, 0, .66, .5)
imager[5].SetViewport(.66, 0, 1, .5)
renWin.SetSize(768, 512)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "TestAllLogic.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestAllLogic, 'test')])
| bsd-3-clause |
endlessm/chromium-browser | native_client/pnacl/driver/driver_env.py | 2 | 18165 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Global environment and expression parsing for the PNaCl driver
# This dictionary initializes a shell-like environment.
# Shell escaping and ${} substitution are provided.
# See "class Environment" defined later for the implementation.
from __future__ import print_function
from driver_log import Log, DriverExit
from shelltools import shell
import types
INITIAL_ENV = {
# Set by DriverMain
'DRIVER_PATH' : '', # Absolute path to this driver invocation
'DRIVER_BIN' : '', # PNaCl driver bin/ directory
'DRIVER_REV_FILE' : '${BASE}/REV',
'BASE_NACL' : '${@FindBaseNaCl}', # Absolute path of native_client/
'BASE_TOOLCHAIN' : '${@FindBaseToolchain}', # Absolute path to toolchain/OS_ARCH/
'BASE' : '${@FindBasePNaCl}', # Absolute path to PNaCl
'BUILD_OS' : '${@GetBuildOS}', # "linux", "nacl", "darwin"
# or "windows"
'BUILD_ARCH' : '${@GetBuildArch}', # "x86_64" or "i686" or "i386"
# Directories
'CLANG_VER' : '3.7.0', # Included in path to compiler-owned libs/headers
'BPREFIXES' : '', # Prefixes specified using the -B flag.
'BASE_LLVM' : '${@FindBaseHost:clang}',
'BASE_BINUTILS' : '${@FindBaseHost:arm-nacl-ar}',
'BASE_LIB_NATIVE' : '${BASE}/translator/',
'BASE_USR' : '${BASE}/le32-nacl',
'BASE_SDK' : '${BASE}/sdk',
'BASE_LIB' : '${BASE}/lib/clang/${CLANG_VER}/lib/le32-nacl',
'BASE_USR_ARCH' : '${BASE_USR_%BCLIB_ARCH%}',
'BASE_USR_X8632' : '${BASE}/i686_bc-nacl',
'BASE_USR_X8664' : '${BASE}/x86_64_bc-nacl',
'BASE_USR_ARM' : '${BASE}/arm_bc-nacl',
'BASE_LIB_ARCH' : '${BASE_LIB_%BCLIB_ARCH%}',
'BASE_LIB_X8632' : '${BASE}/lib/clang/${CLANG_VER}/lib/i686_bc-nacl',
'BASE_LIB_X8664' : '${BASE}/lib/clang/${CLANG_VER}/lib/x86_64_bc-nacl',
'BASE_LIB_ARM' : '${BASE}/lib/clang/${CLANG_VER}/lib/arm_bc-nacl',
'LIBS_NATIVE_ARCH' : '${LIBS_NATIVE_%ARCH%}',
'LIBS_NATIVE_ARM' : '${BASE_LIB_NATIVE}arm/lib',
'LIBS_NATIVE_ARM_NONSFI' : '${BASE_LIB_NATIVE}arm-nonsfi/lib',
'LIBS_NATIVE_X8632' : '${BASE_LIB_NATIVE}x86-32/lib',
'LIBS_NATIVE_X8632_NONSFI' : '${BASE_LIB_NATIVE}x86-32-nonsfi/lib',
'LIBS_NATIVE_X8664' : '${BASE_LIB_NATIVE}x86-64/lib',
'LIBS_NATIVE_MIPS32' : '${BASE_LIB_NATIVE}mips32/lib',
'BASE_LLVM_BIN' : '${BASE_LLVM}/bin',
'TRANSLATOR_BIN' :
'${BASE_TOOLCHAIN}/pnacl_translator/translator/${TRANSLATOR_ARCH}/bin',
# TODO(dschuff): Switch these directories to be triple-style arches,
# to match the main toolchain?
'TRANSLATOR_ARCH' : '${TRANSLATOR_ARCH_%ARCH%}',
'TRANSLATOR_ARCH_X8632' : 'x86-32',
'TRANSLATOR_ARCH_X8664' : 'x86-64',
'TRANSLATOR_ARCH_ARM' : 'arm',
'TRANSLATOR_ARCH_MIPS32': 'mips32',
'SCONS_OUT' : '${BASE_NACL}/scons-out',
# Driver settings
'ARCH' : '', # Target architecture, including optional
# suffixes such as '_NONSFI' or '_LINUX'.
'BASE_ARCH' : '', # Target architecture without any '_NONSFI' suffix.
# Derived from ARCH field.
'NONSFI_NACL' : '0', # Whether targeting Non-SFI Mode. Derived from
# ARCH field.
'BIAS' : 'NONE', # This can be 'NONE', 'ARM', 'MIPS32', 'X8632' or
# 'X8664'.
# When not set to none, this causes the front-end to
# act like a target-specific compiler. This bias is
# currently needed while compiling newlib,
# and some scons tests.
'DRY_RUN' : '0',
'SAVE_TEMPS' : '0', # Do not clean up temporary files
'SANDBOXED' : '0', # Use sandboxed toolchain for this arch. (main switch)
'HAS_FRONTEND': '', # Set by ReadConfig(). '1' if the driver install
# has support for front-end bitcode tools, or '0'
# if it only has the backend translator.
'USE_EMULATOR' : '0',
# Args passed from one driver invocation to another
'INHERITED_DRIVER_ARGS' : '',
'BCLIB_ARCH' : '',
# Logging settings
'LOG_VERBOSE' : '0', # Log to stdout (--pnacl-driver-verbose)
# Conventions
'SO_EXT' : '${SO_EXT_%BUILD_OS%}',
'SO_EXT_darwin' : '.dylib',
'SO_EXT_linux' : '.so',
'SO_EXT_nacl' : '.so',
'SO_EXT_windows' : '.dll',
'SO_DIR' : '${SO_DIR_%BUILD_OS%}',
'SO_DIR_darwin' : 'lib',
'SO_DIR_linux' : 'lib',
'SO_DIR_nacl' : 'lib',
'SO_DIR_windows' : 'bin', # On Windows, DLLs are placed in bin/
# because the dynamic loader searches %PATH%
'EXEC_EXT' : '${EXEC_EXT_%BUILD_OS%}',
'EXEC_EXT_darwin' : '',
'EXEC_EXT_linux' : '',
'EXEC_EXT_nacl' : '',
'EXEC_EXT_windows': '.exe',
'SCONS_OS' : '${SCONS_OS_%BUILD_OS%}',
'SCONS_OS_linux' : 'linux',
'SCONS_OS_nacl' : 'nacl',
'SCONS_OS_darwin' : 'mac',
'SCONS_OS_windows' : 'win',
# llvm goldplugin
'GOLD_PLUGIN_SO' : '${BASE_LLVM}/${SO_DIR}/LLVMgold${SO_EXT}',
'SCONS_STAGING' : '${SCONS_STAGING_%ARCH%}',
'SCONS_STAGING_X8632' : '${SCONS_OUT}/opt-${SCONS_OS}-x86-32/staging',
'SCONS_STAGING_X8664' : '${SCONS_OUT}/opt-${SCONS_OS}-x86-64/staging',
'SCONS_STAGING_ARM' : '${SCONS_OUT}/opt-${SCONS_OS}-arm/staging',
'SCONS_STAGING_MIPS32': '${SCONS_OUT}/opt-${SCONS_OS}-mips32/staging',
'SEL_LDR_PREFIX' : '${USE_EMULATOR ? ${EMULATOR}}',
# NOTE: -Q skips sel_ldr qualification tests, -c -c skips validation
'SEL_LDR_FLAGS' : '-B ${IRT_BLOB} ' +
'${USE_EMULATOR ? -Q -c -c}',
'IRT_STAGING' : '${IRT_STAGING_%ARCH%}',
'IRT_STAGING_X8632' : '${SCONS_OUT}/nacl_irt-x86-32/staging',
'IRT_STAGING_X8664' : '${SCONS_OUT}/nacl_irt-x86-64/staging',
'IRT_STAGING_ARM' : '${SCONS_OUT}/nacl_irt-arm/staging',
'IRT_STAGING_MIPS32' : '${SCONS_OUT}/nacl_irt-mips32/staging',
'IRT_BLOB' : '${IRT_STAGING}/irt_core.nexe',
'EMULATOR' : '${EMULATOR_%ARCH%}',
'EMULATOR_X8632' : '',
'EMULATOR_X8664' : '',
# NOTE: this is currently the only dependency on the arm trusted TC
'EMULATOR_ARM' :
'${BASE_NACL}/toolchain/linux_x86/arm_trusted/run_under_qemu_arm',
'EMULATOR_MIPS32' :
'${BASE_NACL}/toolchain/linux_x86/mips_trusted/run_under_qemu_mips32',
'SEL_LDR' : '${SCONS_STAGING}/sel_ldr${EXEC_EXT}',
'BOOTSTRAP_LDR' : '${SCONS_STAGING}/nacl_helper_bootstrap${EXEC_EXT}',
# sandboxed LLVM backend
'LLC_SB' : '${TRANSLATOR_BIN}/pnacl-llc.nexe',
# sandboxed linker (gold based)
'LD_SB' : '${TRANSLATOR_BIN}/ld.nexe',
# sandboxed Subzero backend
'PNACL_SZ_SB' : '${TRANSLATOR_BIN}/pnacl-sz.nexe',
# Bitcode LLVM tools
'CLANG' : '${BASE_LLVM_BIN}/clang${EXEC_EXT}',
# 'clang++' doesn't work on Windows (outside of Cygwin),
# because it is a symlink.
'CLANGXX' : '${BASE_LLVM_BIN}/clang${EXEC_EXT} --driver-mode=g++',
'LLVM_OPT' : '${BASE_LLVM_BIN}/opt${EXEC_EXT}',
'LLVM_DIS' : '${BASE_LLVM_BIN}/llvm-dis${EXEC_EXT}',
'LLVM_NM' : '${BASE_LLVM_BIN}/llvm-nm${EXEC_EXT}',
# llvm-as compiles llvm assembly (.ll) to bitcode (.bc/.po)
'LLVM_AS' : '${BASE_LLVM_BIN}/llvm-as${EXEC_EXT}',
'PNACL_ABICHECK': '${BASE_LLVM_BIN}/pnacl-abicheck${EXEC_EXT}',
'PNACL_COMPRESS': '${BASE_LLVM_BIN}/pnacl-bccompress${EXEC_EXT}',
# Native LLVM tools
'LLVM_PNACL_LLC': '${BASE_LLVM_BIN}/pnacl-llc${EXEC_EXT}',
'LLVM_PNACL_SZ': '${BASE_LLVM_BIN}/pnacl-sz${EXEC_EXT}',
# llvm-mc is llvm's native assembler
'LLVM_MC' : '${BASE_LLVM_BIN}/llvm-mc${EXEC_EXT}',
# Binutils
'BINUTILS_BASE' : '${BASE_BINUTILS}/bin/arm-nacl-',
'OBJDUMP' : '${BINUTILS_BASE}objdump${EXEC_EXT}',
'NM' : '${BINUTILS_BASE}nm${EXEC_EXT}',
'AR' : '${BINUTILS_BASE}ar${EXEC_EXT}',
'RANLIB' : '${BINUTILS_BASE}ranlib${EXEC_EXT}',
'READELF' : '${BINUTILS_BASE}readelf${EXEC_EXT}',
'STRIP' : '${BINUTILS_BASE}strip${EXEC_EXT}',
# linker (used for both bitcode and ELF linking)
'LD' : '${BINUTILS_BASE}ld.gold${EXEC_EXT}',
}
######################################################################
#
# Environment
#
######################################################################
def ParseError(s, leftpos, rightpos, msg):
Log.Error("Parse Error: %s", msg)
Log.Error(' ' + s)
Log.Error(' ' + (' '*leftpos) + ('^'*(rightpos - leftpos + 1)))
DriverExit(1)
# Find the leftmost position in "s" which begins a substring
# in "strset", starting at "pos".
# For example:
# FindFirst('hello world', 0, ['h','o']) = ('h', 0)
# FindFirst('hello world', 1, ['h','o']) = ('o', 4)
# FindFirst('hello world', 0, ['x']) = (None,11)
def FindFirst(s, pos, strset):
m = {}
for ss in strset:
m[s.find(ss, pos)] = ss
if -1 in m:
del m[-1]
if len(m) == 0:
return (None, len(s))
pos = min(m)
return (m[pos], pos)
class Environment(object):
functions = {}
@classmethod
def register(cls, func):
""" Register a function for use in the evaluator """
cls.functions[func.__name__] = func
return func
def __init__(self):
self.stack = []
self.reset()
def reset(self):
self.data = dict(INITIAL_ENV)
def update(self, extra):
self.data.update(extra)
def dump(self):
for (k,v) in self.data.iteritems():
print('%s == %s' % (k, v))
def push(self):
self.stack.append(self.data)
self.data = dict(self.data) # Make a copy
def pop(self):
self.data = self.stack.pop()
def has(self, varname):
return varname in self.data
def getraw(self, varname):
return self.eval(self.data[varname])
# Evaluate a variable from the environment.
# Returns a list of terms.
def get(self, varname):
return shell.split(self.getraw(varname))
# Retrieve a variable from the environment which
# is a single term. Returns a string.
def getone(self, varname):
return shell.unescape(self.getraw(varname))
def getbool(self, varname):
return bool(int(self.getone(varname)))
def setbool(self, varname, val):
if val:
self.set(varname, '1')
else:
self.set(varname, '0')
# Set a variable in the environment without shell-escape
def setraw(self, varname, val):
self.data[varname] = val
# Set one or more variables using named arguments
def setmany(self, **kwargs):
for k,v in kwargs.iteritems():
if isinstance(v, types.StringTypes):
self.set(k, v)
elif isinstance(v, types.ListType):
self.set(k, *v)
else:
Log.Fatal('env.setmany given a non-string and non-list value')
def clear(self, varname):
self.data[varname] = ''
# Set a variable to one or more terms, applying shell-escape.
def set(self, varname, *vals):
self.clear(varname)
self.append(varname, *vals)
# Append one or more terms to a variable in the
# environment, applying shell-escape.
def append(self, varname, *vals):
escaped = [ shell.escape(v) for v in vals ]
if len(self.data[varname]) > 0:
self.data[varname] += ' '
self.data[varname] += ' '.join(escaped)
# Evaluate an expression s
def eval(self, s):
(result, i) = self.eval_expr(s, 0, [])
assert(i == len(s))
return result
######################################################################
# EXPRESSION EVALUATION CODE
# Context Free Grammar:
#
# str = empty | string literal
# expr = str | expr '$' '{' bracket_expr '}' expr
# bracket_expr = varname | boolexpr ? expr | boolexpr ? expr : expr | @call
# boolexpr = boolval | boolval '&&' boolexpr | boolval '||' boolexpr
# boolval = varname | !varname | #varname | !#varname | varname '==' str
# varname = str | varname '%' bracket_expr '%' varname
# call = func | func ':' arglist
# func = str
# arglist = empty | arg ':' arglist
#
# Do not call these functions outside of this class.
# The env.eval method is the external interface to the evaluator.
######################################################################
# Evaluate a string literal
def eval_str(self, s, pos, terminators):
(_,i) = FindFirst(s, pos, terminators)
return (s[pos:i], i)
# Evaluate %var% substitutions inside a variable name.
# Returns (the_actual_variable_name, endpos)
# Terminated by } character
def eval_varname(self, s, pos, terminators):
(_,i) = FindFirst(s, pos, ['%'] + terminators)
leftpart = s[pos:i].strip(' ')
if i == len(s) or s[i] in terminators:
return (leftpart, i)
(middlepart, j) = self.eval_bracket_expr(s, i+1, ['%'])
if j == len(s) or s[j] != '%':
ParseError(s, i, j, "Unterminated %")
(rightpart, k) = self.eval_varname(s, j+1, terminators)
fullname = leftpart + middlepart + rightpart
fullname = fullname.strip()
return (fullname, k)
# Absorb whitespace
def eval_whitespace(self, s, pos):
i = pos
while i < len(s) and s[i] == ' ':
i += 1
return (None, i)
def eval_bool_val(self, s, pos, terminators):
(_,i) = self.eval_whitespace(s, pos)
if s[i] == '!':
negated = True
i += 1
else:
negated = False
(_,i) = self.eval_whitespace(s, i)
if s[i] == '#':
uselen = True
i += 1
else:
uselen = False
(varname, j) = self.eval_varname(s, i, ['=']+terminators)
if j == len(s):
# This is an error condition one level up. Don't evaluate anything.
return (False, j)
if varname not in self.data:
ParseError(s, i, j, "Undefined variable '%s'" % varname)
vardata = self.data[varname]
contents = self.eval(vardata)
if s[j] == '=':
# String equality test
if j+1 == len(s) or s[j+1] != '=':
ParseError(s, j, j, "Unexpected token")
if uselen:
ParseError(s, j, j, "Cannot combine == and #")
(_,j) = self.eval_whitespace(s, j+2)
(literal_str,j) = self.eval_str(s, j, [' ']+terminators)
(_,j) = self.eval_whitespace(s, j)
if j == len(s):
return (False, j) # Error one level up
else:
literal_str = None
if uselen:
val = (len(contents) != 0)
elif literal_str is not None:
val = (contents == literal_str)
else:
if contents not in ('0','1'):
ParseError(s, j, j,
"%s evaluated to %s, which is not a boolean!" % (varname, contents))
val = bool(int(contents))
return (negated ^ val, j)
# Evaluate a boolexpr
def eval_bool_expr(self, s, pos, terminators):
(boolval1, i) = self.eval_bool_val(s, pos, ['&','|']+terminators)
if i == len(s):
# This is an error condition one level up. Don't evaluate anything.
return (False, i)
if s[i] in ('&','|'):
# and/or expression
if i+1 == len(s) or s[i+1] != s[i]:
ParseError(s, i, i, "Unexpected token")
is_and = (s[i] == '&')
(boolval2, j) = self.eval_bool_expr(s, i+2, terminators)
if j == len(s):
# This is an error condition one level up.
return (False, j)
if is_and:
return (boolval1 and boolval2, j)
else:
return (boolval1 or boolval2, j)
return (boolval1, i)
# Evaluate the inside of a ${} or %%.
# Returns the (the_evaluated_string, endpos)
def eval_bracket_expr(self, s, pos, terminators):
(_,pos) = self.eval_whitespace(s, pos)
if s[pos] == '@':
# Function call: ${@func}
# or possibly : ${@func:arg1:arg2...}
(_,i) = FindFirst(s, pos, [':']+terminators)
if i == len(s):
return ('', i) # Error one level up
funcname = s[pos+1:i]
if s[i] != ':':
j = i
args = []
else:
(_,j) = FindFirst(s, i+1, terminators)
if j == len(s):
return ('', j) # Error one level up
args = s[i+1:j].split(':')
val = self.functions[funcname](*args)
contents = self.eval(val)
return (contents, j)
(m,_) = FindFirst(s, pos, ['?']+terminators)
if m != '?':
# Regular variable substitution
(varname,i) = self.eval_varname(s, pos, terminators)
if len(s) == i:
return ('', i) # Error one level up
if varname not in self.data:
ParseError(s, pos, i, "Undefined variable '%s'" % varname)
vardata = self.data[varname]
contents = self.eval(vardata)
return (contents, i)
else:
# Ternary Mode
(is_cond_true,i) = self.eval_bool_expr(s, pos, ['?']+terminators)
assert(i < len(s) and s[i] == '?')
(if_true_expr, j) = self.eval_expr(s, i+1, [' : ']+terminators)
if j == len(s):
return ('', j) # Error one level up
if s[j:j+3] == ' : ':
(if_false_expr,j) = self.eval_expr(s, j+3, terminators)
if j == len(s):
# This is an error condition one level up.
return ('', j)
else:
if_false_expr = ''
if is_cond_true:
contents = if_true_expr.strip()
else:
contents = if_false_expr.strip()
return (contents, j)
# Evaluate an expression with ${} in string s, starting at pos.
# Returns (the_evaluated_expression, endpos)
def eval_expr(self, s, pos, terminators):
(m,i) = FindFirst(s, pos, ['${'] + terminators)
leftpart = s[pos:i]
if i == len(s) or m in terminators:
return (leftpart, i)
(middlepart, j) = self.eval_bracket_expr(s, i+2, ['}'])
if j == len(s) or s[j] != '}':
ParseError(s, i, j, 'Unterminated ${')
(rightpart, k) = self.eval_expr(s, j+1, terminators)
return (leftpart + middlepart + rightpart, k)
env = Environment()
def override_env(meth_name, func):
"""Override a method in the global |env|, given the method name
and the new function.
"""
global env
setattr(env, meth_name, types.MethodType(func, env, Environment))
| bsd-3-clause |
pekeler/arangodb | 3rdParty/V8-4.3.61/build/gyp/test/make/gyptest-noload.py | 362 | 2023 | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests the use of the NO_LOAD flag which makes loading sub .mk files
optional.
"""
# Python 2.5 needs this for the with statement.
from __future__ import with_statement
import os
import TestGyp
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('all.gyp', chdir='noload')
test.relocate('noload', 'relocate/noload')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload')
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Just sanity test that NO_LOAD=lib doesn't break anything.
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=z'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Make sure we can rebuild without reloading the sub .mk file.
with open('relocate/noload/main.c', 'a') as src_file:
src_file.write("\n")
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Change shared.c, but verify that it doesn't get rebuild if we don't load it.
with open('relocate/noload/lib/shared.c', 'w') as shared_file:
shared_file.write(
'#include "shared.h"\n'
'const char kSharedStr[] = "modified";\n'
)
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.pass_test()
| apache-2.0 |
chen0510566/MissionPlanner | Lib/site-packages/numpy/doc/structured_arrays.py | 58 | 7222 | """
=====================================
Structured Arrays (aka Record Arrays)
=====================================
Introduction
============
Numpy provides powerful capabilities to create arrays of structs or records.
These arrays permit one to manipulate the data by the structs or by fields of
the struct. A simple example will show what is meant.: ::
>>> x = np.zeros((2,),dtype=('i4,f4,a10'))
>>> x[:] = [(1,2.,'Hello'),(2,3.,"World")]
>>> x
array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')])
Here we have created a one-dimensional array of length 2. Each element of
this array is a record that contains three items, a 32-bit integer, a 32-bit
float, and a string of length 10 or less. If we index this array at the second
position we get the second record: ::
>>> x[1]
(2,3.,"World")
Conveniently, one can access any field of the array by indexing using the
string that names that field. In this case the fields have received the
default names 'f0', 'f1' and 'f2'.
>>> y = x['f1']
>>> y
array([ 2., 3.], dtype=float32)
>>> y[:] = 2*y
>>> y
array([ 4., 6.], dtype=float32)
>>> x
array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')])
In these examples, y is a simple float array consisting of the 2nd field
in the record. But, rather than being a copy of the data in the structured
array, it is a view, i.e., it shares exactly the same memory locations.
Thus, when we updated this array by doubling its values, the structured
array shows the corresponding values as doubled as well. Likewise, if one
changes the record, the field view also changes: ::
>>> x[1] = (-1,-1.,"Master")
>>> x
array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
dtype=[('f0', '>i4'), ('f1', '>f4'), ('f2', '|S10')])
>>> y
array([ 4., -1.], dtype=float32)
Defining Structured Arrays
==========================
One defines a structured array through the dtype object. There are
**several** alternative ways to define the fields of a record. Some of
these variants provide backward compatibility with Numeric, numarray, or
another module, and should not be used except for such purposes. These
will be so noted. One specifies record structure in
one of four alternative ways, using an argument (as supplied to a dtype
function keyword or a dtype object constructor itself). This
argument must be one of the following: 1) string, 2) tuple, 3) list, or
4) dictionary. Each of these is briefly described below.
1) String argument (as used in the above examples).
In this case, the constructor expects a comma-separated list of type
specifiers, optionally with extra shape information.
The type specifiers can take 4 different forms: ::
a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f4, f8, c8, c16, a<n>
(representing bytes, ints, unsigned ints, floats, complex and
fixed length strings of specified byte lengths)
b) int8,...,uint8,...,float32, float64, complex64, complex128
(this time with bit sizes)
c) older Numeric/numarray type specifications (e.g. Float32).
Don't use these in new code!
d) Single character type specifiers (e.g H for unsigned short ints).
Avoid using these unless you must. Details can be found in the
Numpy book
These different styles can be mixed within the same string (but why would you
want to do that?). Furthermore, each type specifier can be prefixed
with a repetition number, or a shape. In these cases an array
element is created, i.e., an array within a record. That array
is still referred to as a single field. An example: ::
>>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
>>> x
array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
By using strings to define the record structure, it precludes being
able to name the fields in the original definition. The names can
be changed as shown later, however.
2) Tuple argument: The only relevant tuple case that applies to record
structures is when a structure is mapped to an existing data type. This
is done by pairing in a tuple, the existing data type with a matching
dtype definition (using any of the variants being described here). As
an example (using a definition using a list, so see 3) for further
details): ::
>>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
>>> x
array([0, 0, 0])
>>> x['r']
array([0, 0, 0], dtype=uint8)
In this case, an array is produced that looks and acts like a simple int32 array,
but also has definitions for fields that use only one byte of the int32 (a bit
like Fortran equivalencing).
3) List argument: In this case the record structure is defined with a list of
tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
('' is permitted), 2) the type of the field, and 3) the shape (optional).
For example:
>>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
>>> x
array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
4) Dictionary argument: two different forms are permitted. The first consists
of a dictionary with two required keys ('names' and 'formats'), each having an
equal sized list of values. The format list contains any type/shape specifier
allowed in other contexts. The names must be strings. There are two optional
keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
the required two where offsets contain integer offsets for each field, and
titles are objects containing metadata for each field (these do not have
to be strings), where the value of None is permitted. As an example: ::
>>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[('col1', '>i4'), ('col2', '>f4')])
The other dictionary form permitted is a dictionary of name keys with tuple
values specifying type, offset, and an optional title.
>>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
Accessing and modifying field names
===================================
The field names are an attribute of the dtype object defining the record structure.
For the last example: ::
>>> x.dtype.names
('col1', 'col2')
>>> x.dtype.names = ('x', 'y')
>>> x
array([(0, 0.0), (0, 0.0), (0, 0.0)],
dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
>>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
Accessing field titles
====================================
The field titles provide a standard place to put associated info for fields.
They do not have to be strings.
>>> x.dtype.fields['x'][2]
'title 1'
"""
| gpl-3.0 |
wooga/airflow | airflow/providers/google/marketing_platform/example_dags/example_search_ads.py | 5 | 2874 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use SearchAds.
"""
import os
from airflow import models
from airflow.providers.google.marketing_platform.operators.search_ads import (
GoogleSearchAdsDownloadReportOperator, GoogleSearchAdsInsertReportOperator,
)
from airflow.providers.google.marketing_platform.sensors.search_ads import GoogleSearchAdsReportSensor
from airflow.utils import dates
# [START howto_search_ads_env_variables]
AGENCY_ID = os.environ.get("GMP_AGENCY_ID")
ADVERTISER_ID = os.environ.get("GMP_ADVERTISER_ID")
GCS_BUCKET = os.environ.get("GMP_GCS_BUCKET", "test-cm-bucket")
REPORT = {
"reportScope": {"agencyId": AGENCY_ID, "advertiserId": ADVERTISER_ID},
"reportType": "account",
"columns": [{"columnName": "agency"}, {"columnName": "lastModifiedTimestamp"}],
"includeRemovedEntities": False,
"statisticsCurrency": "usd",
"maxRowsPerFile": 1000000,
"downloadFormat": "csv",
}
# [END howto_search_ads_env_variables]
default_args = {"start_date": dates.days_ago(1)}
with models.DAG(
"example_search_ads",
default_args=default_args,
schedule_interval=None, # Override to match your needs
) as dag:
# [START howto_search_ads_generate_report_operator]
generate_report = GoogleSearchAdsInsertReportOperator(
report=REPORT, task_id="generate_report"
)
# [END howto_search_ads_generate_report_operator]
# [START howto_search_ads_get_report_id]
report_id = "{{ task_instance.xcom_pull('generate_report', key='report_id') }}"
# [END howto_search_ads_get_report_id]
# [START howto_search_ads_get_report_operator]
wait_for_report = GoogleSearchAdsReportSensor(
report_id=report_id, task_id="wait_for_report"
)
# [END howto_search_ads_get_report_operator]
# [START howto_search_ads_getfile_report_operator]
download_report = GoogleSearchAdsDownloadReportOperator(
report_id=report_id, bucket_name=GCS_BUCKET, task_id="download_report"
)
# [END howto_search_ads_getfile_report_operator]
generate_report >> wait_for_report >> download_report
| apache-2.0 |
futurecolors/suds | suds/serviceproxy.py | 206 | 2978 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The service proxy provides access to web services.
Replaced by: L{client.Client}
"""
from logging import getLogger
from suds import *
from suds.client import Client
log = getLogger(__name__)
class ServiceProxy(object):
"""
A lightweight soap based web service proxy.
@ivar __client__: A client.
Everything is delegated to the 2nd generation API.
@type __client__: L{Client}
@note: Deprecated, replaced by L{Client}.
"""
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@keyword faults: Raise faults raised by server (default:True),
else return tuple from service method invocation as (http code, object).
@type faults: boolean
@keyword proxy: An http proxy to be specified on requests (default:{}).
The proxy is defined as {protocol:proxy,}
@type proxy: dict
"""
client = Client(url, **kwargs)
self.__client__ = client
def get_instance(self, name):
"""
Get an instance of a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def get_enum(self, name):
"""
Get an instance of an enumeration defined in the WSDL by name.
@param name: The name of a enumeration defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def __str__(self):
return str(self.__client__)
def __unicode__(self):
return unicode(self.__client__)
def __getattr__(self, name):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return getattr(self.__client__.service, name) | lgpl-3.0 |
tnwhitwell/lexicon | lexicon/providers/cloudflare.py | 1 | 4914 | from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import requests
from .base import Provider as BaseProvider
logger = logging.getLogger(__name__)
def ProviderParser(subparser):
subparser.add_argument("--auth-username", help="specify email address used to authenticate")
subparser.add_argument("--auth-token", help="specify token used authenticate")
class Provider(BaseProvider):
def __init__(self, options, engine_overrides=None):
super(Provider, self).__init__(options, engine_overrides)
self.domain_id = None
self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.cloudflare.com/client/v4')
def authenticate(self):
payload = self._get('/zones', {
'name': self.options['domain'],
'status': 'active'
})
if not payload['result']:
raise Exception('No domain found')
if len(payload['result']) > 1:
raise Exception('Too many domains found. This should not happen')
self.domain_id = payload['result'][0]['id']
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
data = {'type': type, 'name': self._full_name(name), 'content': content}
if self.options.get('ttl'):
data['ttl'] = self.options.get('ttl')
payload = {'success': True}
try:
payload = self._post('/zones/{0}/dns_records'.format(self.domain_id), data)
except requests.exceptions.HTTPError as err:
already_exists = next((True for error in err.response.json()['errors'] if error['code'] == 81057), False)
if not already_exists:
raise
logger.debug('create_record: %s', payload['success'])
return payload['success']
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
filter = {'per_page': 100}
if type:
filter['type'] = type
if name:
filter['name'] = self._full_name(name)
if content:
filter['content'] = content
payload = self._get('/zones/{0}/dns_records'.format(self.domain_id), filter)
records = []
for record in payload['result']:
processed_record = {
'type': record['type'],
'name': record['name'],
'ttl': record['ttl'],
'content': record['content'],
'id': record['id']
}
records.append(processed_record)
logger.debug('list_records: %s', records)
return records
# Create or update a record.
def update_record(self, identifier, type=None, name=None, content=None):
data = {}
if type:
data['type'] = type
if name:
data['name'] = self._full_name(name)
if content:
data['content'] = content
if self.options.get('ttl'):
data['ttl'] = self.options.get('ttl')
payload = self._put('/zones/{0}/dns_records/{1}'.format(self.domain_id, identifier), data)
logger.debug('update_record: %s', payload['success'])
return payload['success']
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, type=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self.list_records(type, name, content)
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
logger.debug('delete_records: %s', delete_record_id)
for record_id in delete_record_id:
payload = self._delete('/zones/{0}/dns_records/{1}'.format(self.domain_id, record_id))
logger.debug('delete_record: %s', True)
return True
# Helpers
def _request(self, action='GET', url='/', data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
r = requests.request(action, self.api_endpoint + url, params=query_params,
data=json.dumps(data),
headers={
'X-Auth-Email': self.options['auth_username'],
'X-Auth-Key': self.options.get('auth_token'),
'Content-Type': 'application/json'
})
r.raise_for_status() # if the request fails for any reason, throw an error.
return r.json()
| mit |
red-hood/calendarserver | contrib/performance/benchmarks/find_events.py | 1 | 3184 | ##
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from itertools import count
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, gatherResults
from twisted.internet.task import cooperate
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.web.http import MULTI_STATUS
from contrib.performance.httpauth import AuthHandlerAgent
from contrib.performance.httpclient import StringProducer
from contrib.performance.benchlib import CalDAVAccount, sample
from contrib.performance.benchmarks.event import makeEvent
PROPFIND = """\
<?xml version="1.0" encoding="utf-8"?>
<x0:propfind xmlns:x0="DAV:" xmlns:x1="http://calendarserver.org/ns/">
<x0:prop>
<x0:getetag/>
<x0:resourcetype/>
<x1:notificationtype/>
</x0:prop>
</x0:propfind>
"""
def uploadEvents(numEvents, agent, uri, cal):
def worker():
for i in range(numEvents):
event = makeEvent(i, 1, 0)
yield agent.request(
'PUT',
'%s%s%d.ics' % (uri, cal, i),
Headers({"content-type": ["text/calendar"]}),
StringProducer(event))
worker = worker()
return gatherResults([
cooperate(worker).whenDone() for _ignore_i in range(3)])
@inlineCallbacks
def measure(host, port, dtrace, numEvents, samples):
user = password = "user11"
root = "/"
principal = "/"
uri = "http://%s:%d/" % (host, port)
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri=uri,
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Create the number of calendars necessary
account = CalDAVAccount(
agent,
"%s:%d" % (host, port),
user=user, password=password,
root=root, principal=principal)
cal = "calendars/users/%s/find-events/" % (user,)
yield account.makeCalendar("/" + cal)
# Create the indicated number of events on the calendar
yield uploadEvents(numEvents, agent, uri, cal)
body = StringProducer(PROPFIND)
params = (
('PROPFIND',
'%scalendars/__uids__/%s/find-events/' % (uri, user),
Headers({"depth": ["1"], "content-type": ["text/xml"]}), body)
for i in count(1))
samples = yield sample(dtrace, samples, agent, params.next, MULTI_STATUS)
# Delete the calendar we created to leave the server in roughly
# the same state as we found it.
yield account.deleteResource("/" + cal)
returnValue(samples)
| apache-2.0 |
gnu3ra/SCC15HPCRepast | INSTALLATION/boost_1_54_0/libs/python/pyste/src/Pyste/utils.py | 54 | 2614 | # Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from __future__ import generators
import string
import sys
#==============================================================================
# enumerate
#==============================================================================
def enumerate(seq):
i = 0
for x in seq:
yield i, x
i += 1
#==============================================================================
# makeid
#==============================================================================
_valid_chars = string.ascii_letters + string.digits + '_'
_valid_chars = dict(zip(_valid_chars, _valid_chars))
def makeid(name):
'Returns the name as a valid identifier'
if type(name) != str:
print type(name), name
newname = []
for char in name:
if char not in _valid_chars:
char = '_'
newname.append(char)
newname = ''.join(newname)
# avoid duplications of '_' chars
names = [x for x in newname.split('_') if x]
return '_'.join(names)
#==============================================================================
# remove_duplicated_lines
#==============================================================================
def remove_duplicated_lines(text):
includes = text.splitlines()
d = dict([(include, 0) for include in includes])
includes = d.keys()
includes.sort()
return '\n'.join(includes)
#==============================================================================
# left_equals
#==============================================================================
def left_equals(s):
s = '// %s ' % s
return s + ('='*(80-len(s))) + '\n'
#==============================================================================
# post_mortem
#==============================================================================
def post_mortem():
def info(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
print
# ...then start the debugger in post-mortem mode.
pdb.pm()
sys.excepthook = info
| bsd-3-clause |
honnibal/spaCy | spacy/lookups.py | 1 | 8880 | # coding: utf-8
from __future__ import unicode_literals
import srsly
from collections import OrderedDict
from preshed.bloom import BloomFilter
from .errors import Errors
from .util import SimpleFrozenDict, ensure_path
from .strings import get_string_id
UNSET = object()
class Lookups(object):
"""Container for large lookup tables and dictionaries, e.g. lemmatization
data or tokenizer exception lists. Lookups are available via vocab.lookups,
so they can be accessed before the pipeline components are applied (e.g.
in the tokenizer and lemmatizer), as well as within the pipeline components
via doc.vocab.lookups.
"""
def __init__(self):
"""Initialize the Lookups object.
RETURNS (Lookups): The newly created object.
DOCS: https://spacy.io/api/lookups#init
"""
self._tables = OrderedDict()
def __contains__(self, name):
"""Check if the lookups contain a table of a given name. Delegates to
Lookups.has_table.
name (unicode): Name of the table.
RETURNS (bool): Whether a table of that name is in the lookups.
"""
return self.has_table(name)
def __len__(self):
"""RETURNS (int): The number of tables in the lookups."""
return len(self._tables)
@property
def tables(self):
"""RETURNS (list): Names of all tables in the lookups."""
return list(self._tables.keys())
def add_table(self, name, data=SimpleFrozenDict()):
"""Add a new table to the lookups. Raises an error if the table exists.
name (unicode): Unique name of table.
data (dict): Optional data to add to the table.
RETURNS (Table): The newly added table.
DOCS: https://spacy.io/api/lookups#add_table
"""
if name in self.tables:
raise ValueError(Errors.E158.format(name=name))
table = Table(name=name, data=data)
self._tables[name] = table
return table
def get_table(self, name, default=UNSET):
"""Get a table. Raises an error if the table doesn't exist and no
default value is provided.
name (unicode): Name of the table.
default: Optional default value to return if table doesn't exist.
RETURNS (Table): The table.
DOCS: https://spacy.io/api/lookups#get_table
"""
if name not in self._tables:
if default == UNSET:
raise KeyError(Errors.E159.format(name=name, tables=self.tables))
return default
return self._tables[name]
def remove_table(self, name):
"""Remove a table. Raises an error if the table doesn't exist.
name (unicode): Name of the table to remove.
RETURNS (Table): The removed table.
DOCS: https://spacy.io/api/lookups#remove_table
"""
if name not in self._tables:
raise KeyError(Errors.E159.format(name=name, tables=self.tables))
return self._tables.pop(name)
def has_table(self, name):
"""Check if the lookups contain a table of a given name.
name (unicode): Name of the table.
RETURNS (bool): Whether a table of that name exists.
DOCS: https://spacy.io/api/lookups#has_table
"""
return name in self._tables
def to_bytes(self, **kwargs):
"""Serialize the lookups to a bytestring.
RETURNS (bytes): The serialized Lookups.
DOCS: https://spacy.io/api/lookups#to_bytes
"""
return srsly.msgpack_dumps(self._tables)
def from_bytes(self, bytes_data, **kwargs):
"""Load the lookups from a bytestring.
bytes_data (bytes): The data to load.
RETURNS (Lookups): The loaded Lookups.
DOCS: https://spacy.io/api/lookups#from_bytes
"""
self._tables = OrderedDict()
for key, value in srsly.msgpack_loads(bytes_data).items():
self._tables[key] = Table(key)
self._tables[key].update(value)
return self
def to_disk(self, path, **kwargs):
"""Save the lookups to a directory as lookups.bin. Expects a path to a
directory, which will be created if it doesn't exist.
path (unicode / Path): The file path.
DOCS: https://spacy.io/api/lookups#to_disk
"""
if len(self._tables):
path = ensure_path(path)
if not path.exists():
path.mkdir()
filepath = path / "lookups.bin"
with filepath.open("wb") as file_:
file_.write(self.to_bytes())
def from_disk(self, path, **kwargs):
"""Load lookups from a directory containing a lookups.bin. Will skip
loading if the file doesn't exist.
path (unicode / Path): The directory path.
RETURNS (Lookups): The loaded lookups.
DOCS: https://spacy.io/api/lookups#from_disk
"""
path = ensure_path(path)
filepath = path / "lookups.bin"
if filepath.exists():
with filepath.open("rb") as file_:
data = file_.read()
return self.from_bytes(data)
return self
class Table(OrderedDict):
"""A table in the lookups. Subclass of builtin dict that implements a
slightly more consistent and unified API.
Includes a Bloom filter to speed up missed lookups.
"""
@classmethod
def from_dict(cls, data, name=None):
"""Initialize a new table from a dict.
data (dict): The dictionary.
name (unicode): Optional table name for reference.
RETURNS (Table): The newly created object.
DOCS: https://spacy.io/api/lookups#table.from_dict
"""
self = cls(name=name)
self.update(data)
return self
def __init__(self, name=None, data=None):
"""Initialize a new table.
name (unicode): Optional table name for reference.
data (dict): Initial data, used to hint Bloom Filter.
RETURNS (Table): The newly created object.
DOCS: https://spacy.io/api/lookups#table.init
"""
OrderedDict.__init__(self)
self.name = name
# Assume a default size of 1M items
self.default_size = 1e6
size = len(data) if data and len(data) > 0 else self.default_size
self.bloom = BloomFilter.from_error_rate(size)
if data:
self.update(data)
def __setitem__(self, key, value):
"""Set new key/value pair. String keys will be hashed.
key (unicode / int): The key to set.
value: The value to set.
"""
key = get_string_id(key)
OrderedDict.__setitem__(self, key, value)
self.bloom.add(key)
def set(self, key, value):
"""Set new key/value pair. String keys will be hashed.
Same as table[key] = value.
key (unicode / int): The key to set.
value: The value to set.
"""
self[key] = value
def __getitem__(self, key):
"""Get the value for a given key. String keys will be hashed.
key (unicode / int): The key to get.
RETURNS: The value.
"""
key = get_string_id(key)
return OrderedDict.__getitem__(self, key)
def get(self, key, default=None):
"""Get the value for a given key. String keys will be hashed.
key (unicode / int): The key to get.
default: The default value to return.
RETURNS: The value.
"""
key = get_string_id(key)
return OrderedDict.get(self, key, default)
def __contains__(self, key):
"""Check whether a key is in the table. String keys will be hashed.
key (unicode / int): The key to check.
RETURNS (bool): Whether the key is in the table.
"""
key = get_string_id(key)
# This can give a false positive, so we need to check it after
if key not in self.bloom:
return False
return OrderedDict.__contains__(self, key)
def to_bytes(self):
"""Serialize table to a bytestring.
RETURNS (bytes): The serialized table.
DOCS: https://spacy.io/api/lookups#table.to_bytes
"""
data = [
("name", self.name),
("dict", dict(self.items())),
("bloom", self.bloom.to_bytes()),
]
return srsly.msgpack_dumps(OrderedDict(data))
def from_bytes(self, bytes_data):
"""Load a table from a bytestring.
bytes_data (bytes): The data to load.
RETURNS (Table): The loaded table.
DOCS: https://spacy.io/api/lookups#table.from_bytes
"""
loaded = srsly.msgpack_loads(bytes_data)
data = loaded.get("dict", {})
self.name = loaded["name"]
self.bloom = BloomFilter().from_bytes(loaded["bloom"])
self.clear()
self.update(data)
return self
| mit |
gsmartway/odoo | addons/sale_crm/__openerp__.py | 260 | 2036 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sale order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale', 'crm', 'web_kanban_gauge'],
'data': [
'wizard/crm_make_sale_view.xml',
'sale_crm_view.xml',
'security/sale_crm_security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'test': ['test/sale_crm.yml'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yanikou19/pymatgen | pymatgen/io/feffio_set.py | 3 | 9972 | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module defines the FeffInputSet abstract base class and a concrete
implementation for the Materials Project. The basic concept behind an input
set is to specify a scheme to generate a consistent set of Feff inputs from a
structure without further user intervention. This ensures comparability across
runs.
"""
import six
__author__ = "Alan Dozier"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0.3"
__maintainer__ = "Alan Dozier"
__email__ = "adozier@uky.edu"
__date__ = "April 7, 2013"
import os
import abc
from monty.serialization import loadfn
from pymatgen.io.feffio import FeffAtoms, FeffTags, FeffPot, Header
class AbstractFeffInputSet(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract base class representing a set of Feff input parameters.
The idea is that using a FeffInputSet, a complete set of input files
(feffPOT,feffXANES, feffEXAFS, ATOMS, feff.inp)set_
can be generated in an automated fashion for any structure.
"""
@abc.abstractmethod
def get_feff_atoms(self, structure, central_atom):
"""
Returns Atoms string from a structure that goes in feff.inp file.
Args:
structure: pymatgen structure object
central_atom: atom symbol string for absorbing atom
Returns:
FeffAtoms object.
"""
return
@abc.abstractmethod
def get_feff_tags(self, calc_type):
"""
Returns standard calculation paramters for either an FEFF XANES or
EXAFS input.
Args:
calc_type: At this time either 'XANES' or 'EXAFS' string is
supported for K shell excitation. In the future this will be
expanded to include other shells and material class
differentiation.
"""
return
@abc.abstractmethod
def get_feff_pot(self, structure, central_atom):
"""
Returns POTENTIAL section used in feff.inp from a structure.
Args:
structure: pymatgen structure object
central_atom: atom symbol string for absorbing atom
"""
return
@abc.abstractmethod
def get_header(self, structure, source, comment):
"""
Returns header to be used in feff.inp file from a pymatgen structure
Args:
structure: A pymatgen structure object
source: Source identifier used to create structure, can be defined
however user wants to organize structures, calculations, etc.
example would be Materials Project material ID number.
"""
return
def get_all_feff_input(self, structure, calc_type, source, central_atom,
comment=''):
"""
Returns all input files as a dict of {filename: feffio object}
Args:
structure: Structure object
calc_type: At this time either 'XANES' or 'EXAFS' string is
supported for K shell excitation. In the future this will be
expanded to inlude other shells and material class
differentiation.
source: Source identifier used to create structure, can be defined
however user wants to organize structures, calculations, etc.
example would be Materials Project material ID number.
central_atom: Atom symbol string for absorbing atom
comment: Comment to appear in Header.
Returns:
dict of objects used to create feff.inp file i.e. Header, FeffTags,
FeffPot, FeffAtoms
"""
feff = {"HEADER": self.get_header(structure, source, comment),
"PARAMETERS": self.get_feff_tags(calc_type),
"POTENTIALS": self.get_feff_pot(structure, central_atom),
"ATOMS": self.get_feff_atoms(structure, central_atom)}
return feff
def write_input(self, structure, calc_type, source, central_atom,
comment='', output_dir=".", make_dir_if_not_present=True):
"""
Writes a set of FEFF input to a directory.
Args:
structure: Structure object
calc_type: At this time either 'XANES' or 'EXAFS' string is
supported for K shell excitation. In the future this will be
expanded to include other shells and material class
differentiation.
source: Source identifier used to create structure, can be defined
however user wants to organize structures, calculations, etc.
example would be Materials Project material ID number.
central_atom: Atom symbol string for absorbing atom
output_dir: Directory to output the FEFF input files
comment: comment for Header
make_dir_if_not_present: Set to True if you want the directory (
and the whole path) to be created if it is not present.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
feff = self.get_all_feff_input(structure, calc_type, source,
central_atom, comment)
feff_input = "\n\n".join(str(feff[f]) for f in ["HEADER", "PARAMETERS",
"POTENTIALS", "ATOMS"])
for k, v in six.iteritems(feff):
with open(os.path.join(output_dir, k), "w") as f:
f.write(str(v))
with open(os.path.join(output_dir, "feff.inp"), "w") as f:
f.write(feff_input)
f.close()
def as_dict(self, structure, calc_type, source, central_atom,
comment=''):
"""Creates a feff.inp dictionary as a string"""
feff = self.get_all_feff_input(structure, calc_type, source,
central_atom, comment)
feff_input = "\n\n".join(str(feff[f]) for f in ["HEADER", "PARAMETERS",
"POTENTIALS", "ATOMS"])
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'feff.inp': feff_input}
@staticmethod
def from_dict(d):
"""Return feff.inp from a dictionary string representation"""
return d['feff.inp']
class FeffInputSet(AbstractFeffInputSet):
"""
Standard implementation of FeffInputSet, which can be extended by specific
implementations.
Args:
name: The name of a grouping of input parameter sets such as
"MaterialsProject".
"""
def __init__(self, name):
self.name = name
module_dir = os.path.dirname(os.path.abspath(__file__))
config = loadfn(os.path.join(module_dir, "FeffInputSets.yaml"))
self.xanes_settings = config[self.name + "feffXANES"]
self.exafs_settings = config[self.name + "feffEXAFS"]
def get_header(self, structure, source='', comment=''):
"""
Creates header string from structure object
Args:
structure: A pymatgen structure object
source: Source identifier used to create structure, can be defined
however user wants to organize structures, calculations, etc.
example would be Materials Project material ID number.
comment: comment to include in header
Returns:
Header object to be used in feff.inp file from a pymatgen structure
"""
return Header(structure, source, comment)
def get_feff_tags(self, calc_type):
"""
Reads standard parameters for XANES or EXAFS calculation
from FeffInputSets.yaml file.
Args:
calc_type: At this time either 'XANES' or 'EXAFS' string is
supported for K shell excitation. In the future this will be
expanded to include other shells and material class
differentiation.
Returns:
FeffTags object
"""
if calc_type.upper() == "XANES":
fefftags = FeffTags(self.xanes_settings)
elif calc_type.upper() == "EXAFS":
fefftags = FeffTags(self.exafs_settings)
else:
raise ValueError("{} is not a valid calculation type"
.format(calc_type))
return fefftags
def get_feff_pot(self, structure, central_atom):
"""
Creates string representation of potentials used in POTENTIAL file and
feff.inp.
Args:
structure: pymatgen structure object
central_atom: atom symbol string for absorbing atom
Returns:
FeffPot object
"""
return FeffPot(structure, central_atom)
def get_feff_atoms(self, structure, central_atom):
"""
Creates string representation of atomic shell coordinates using in
ATOMS file and feff.inp.
Args:
structure: pymatgen structure object
central_atom: atom symbol string for absorbing atom
Returns:
FeffAtoms object
"""
return FeffAtoms(structure, central_atom)
def __str__(self):
output = [self.name]
section_names = ["XANES", "EXAFS"]
for ns in section_names:
for d in [self.xanes_settings, self.exafs_settings]:
output.append(ns)
for k, v in six.iteritems(d):
output.append("%s = %s" % (k, str(v)))
output.append("")
return "\n".join(output)
class MaterialsProjectFeffInputSet(FeffInputSet):
"""
Implementation of FeffInputSet utilizing parameters in the public
Materials Project.
"""
def __init__(self):
super(MaterialsProjectFeffInputSet, self).__init__("MaterialsProject")
| mit |
remybaranx/qtaste | doc/src/docbkx/scripts/lib/PyGithub/github/StatsParticipation.py | 74 | 2654 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class StatsParticipation(github.GithubObject.NonCompletableGithubObject):
"""
This class represents statistics of participation. The reference can be found here http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else
"""
@property
def all(self):
"""
:type: list of int
"""
return self._all.value
@property
def owner(self):
"""
:type: list of int
"""
return self._owner.value
def _initAttributes(self):
self._all = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "all" in attributes: # pragma no branch
self._all = self._makeListOfIntsAttribute(attributes["all"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeListOfIntsAttribute(attributes["owner"])
| gpl-3.0 |
SiCKRAGETV/SickRage | sickrage/core/media/__init__.py | 2 | 2512 | from __future__ import unicode_literals
import io
import os
from mimetypes import guess_type
from tornado.escape import url_escape
import sickrage
from sickrage.core.exceptions import MultipleShowObjectsException
from sickrage.core.helpers import findCertainShow
class Media(object):
def __init__(self, indexer_id, media_format=None):
"""
:param indexer_id: The indexer id of the show
:param media_format: The media format of the show image
"""
self.media_format = media_format
if not self.media_format:
self.media_format = 'normal'
try:
self.indexer_id = int(indexer_id)
except ValueError:
self.indexer_id = 0
def get_default_media_name(self):
"""
:return: The name of the file to use as a fallback if the show media file is missing
"""
return ''
@property
def url(self):
"""
:return: The url to the desired media file
"""
path = self.get_static_media_path().replace(sickrage.app.cache_dir, "")
path = path.replace(sickrage.app.config.gui_static_dir, "")
return url_escape(path.replace('\\', '/'), False)
@property
def content(self):
"""
:return: The content of the desired media file
"""
with io.open(os.path.abspath(self.get_static_media_path()).replace('\\', '/'), 'rb') as media:
return media.read()
@property
def type(self):
"""
:return: The mime type of the current media
"""
static_media_path = self.get_static_media_path()
if os.path.isfile(static_media_path):
return guess_type(static_media_path)[0]
return ''
def get_media_path(self):
"""
:return: The path to the media related to ``self.indexer_id``
"""
return ''
@staticmethod
def get_media_root():
"""
:return: The root folder containing the media
"""
return os.path.join(sickrage.app.config.gui_static_dir)
def get_show(self):
"""
:return: The show object associated with ``self.indexer_id`` or ``None``
"""
try:
return findCertainShow(self.indexer_id)
except MultipleShowObjectsException:
return None
def get_static_media_path(self):
"""
:return: The full path to the media
"""
return os.path.normpath(self.get_media_path())
| gpl-3.0 |
epfl-cosmo/lammps | tools/moltemplate/moltemplate/lttree_styles.py | 8 | 10397 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
try:
from .ttree_lex import InputError
except (SystemError, ValueError):
# not installed as a package
from ttree_lex import InputError
# Users of lttree typically generate the following files:
# The variable below refer to file names generated by
# write() and write_once() commands in a lttree-file.
# (I keep changing my mind what I want these names to be.)
data_prefix = "Data "
data_prefix_no_space = "Data"
data_atoms = "Data Atoms"
data_masses = "Data Masses"
data_velocities = "Data Velocities"
data_bonds = "Data Bonds"
data_bond_list = "Data Bond List"
data_angles = "Data Angles"
data_dihedrals = "Data Dihedrals"
data_impropers = "Data Impropers"
data_bond_coeffs = "Data Bond Coeffs"
data_angle_coeffs = "Data Angle Coeffs"
data_dihedral_coeffs = "Data Dihedral Coeffs"
data_improper_coeffs = "Data Improper Coeffs"
data_pair_coeffs = "Data Pair Coeffs"
# interactions-by-type (not id. This is not part of the LAMMPS standard.)
data_bonds_by_type = "Data Bonds By Type"
data_angles_by_type = "Data Angles By Type"
data_dihedrals_by_type = "Data Dihedrals By Type"
data_impropers_by_type = "Data Impropers By Type"
# class2 data sections
data_bondbond_coeffs = "Data BondBond Coeffs"
data_bondangle_coeffs = "Data BondAngle Coeffs"
data_middlebondtorsion_coeffs = "Data MiddleBondTorsion Coeffs"
data_endbondtorsion_coeffs = "Data EndBondTorsion Coeffs"
data_angletorsion_coeffs = "Data AngleTorsion Coeffs"
data_angleangletorsion_coeffs = "Data AngleAngleTorsion Coeffs"
data_bondbond13_coeffs = "Data BondBond13 Coeffs"
data_angleangle_coeffs = "Data AngleAngle Coeffs"
# sections for non-point-like particles:
data_ellipsoids = "Data Ellipsoids"
data_lines = "Data Lines"
data_triangles = "Data Triangles"
# periodic boundary conditions
data_boundary = "Data Boundary"
# (for backward compatibility), an older version of this file was named:
data_pbc = "Data PBC"
# The files are fragments of a LAMMPS data file (see "read_data").
# In addition, moltemplate may also generate the following files:
in_prefix = "In "
in_prefix_no_space = "In"
in_init = "In Init"
in_settings = "In Settings"
in_coords = "In Coords"
# These files represent different sections of the LAMMPS input script.
# Atom Styles in LAMMPS as of 2011-7-29
g_style_map = {'angle': ['atom-ID', 'molecule-ID', 'atom-type', 'x', 'y', 'z'],
'atomic': ['atom-ID', 'atom-type', 'x', 'y', 'z'],
'body': ['atom-ID', 'atom-type', 'bodyflag', 'mass', 'x', 'y', 'z'],
'bond': ['atom-ID', 'molecule-ID', 'atom-type', 'x', 'y', 'z'],
'charge': ['atom-ID', 'atom-type', 'q', 'x', 'y', 'z'],
'dipole': ['atom-ID', 'atom-type', 'q', 'x', 'y', 'z', 'mux', 'muy', 'muz'],
'dpd': ['atom-ID', 'atom-type', 'theta', 'x', 'y', 'z'],
'electron': ['atom-ID', 'atom-type', 'q', 'spin', 'eradius', 'x', 'y', 'z'],
'ellipsoid': ['atom-ID', 'atom-type', 'x', 'y', 'z', 'quatw', 'quati', 'quatj', 'quatk'],
'full': ['atom-ID', 'molecule-ID', 'atom-type', 'q', 'x', 'y', 'z'],
'line': ['atom-ID', 'molecule-ID', 'atom-type', 'lineflag', 'density', 'x', 'y', 'z'],
'meso': ['atom-ID', 'atom-type', 'rho', 'e', 'cv', 'x', 'y', 'z'],
'molecular': ['atom-ID', 'molecule-ID', 'atom-type', 'x', 'y', 'z'],
'peri': ['atom-ID', 'atom-type', 'volume', 'density', 'x', 'y', 'z'],
'smd': ['atom-ID', 'atom-type', 'molecule-ID' 'volume', 'mass', 'kernel-radius', 'contact-radius', 'x', 'y', 'z'],
'sphere': ['atom-ID', 'atom-type', 'diameter', 'density', 'x', 'y', 'z'],
'template': ['atom-ID', 'molecule-ID', 'template-index', 'template-atom', 'atom-type', 'x', 'y', 'z'],
'tri': ['atom-ID', 'molecule-ID', 'atom-type', 'triangleflag', 'density', 'x', 'y', 'z'],
'wavepacket': ['atom-ID', 'atom-type', 'charge', 'spin', 'eradius', 'etag', 'cs_re', 'cs_im', 'x', 'y', 'z'],
'hybrid': ['atom-ID', 'atom-type', 'x', 'y', 'z'],
# The following styles were removed from LAMMPS as of 2012-3
'colloid': ['atom-ID', 'atom-type', 'x', 'y', 'z'],
'granular': ['atom-ID', 'atom-type', 'diameter', 'density', 'x', 'y', 'z']}
def AtomStyle2ColNames(atom_style_string):
atom_style_string = atom_style_string.strip()
if len(atom_style_string) == 0:
raise InputError('Error: Invalid atom_style\n'
' (The atom_style command was followed by an empty string.)\n')
atom_style_args = atom_style_string.split()
atom_style = atom_style_args[0]
hybrid_args = atom_style_args[1:]
if (atom_style not in g_style_map):
if (len(atom_style_args) >= 2):
# If the atom_style_string includes at least 2 words, then we
# interpret this as a list of the individual column names
return atom_style_args
else:
raise InputError(
'Error: Unrecognized atom_style: \"' + atom_style + '\"\n')
if (atom_style != 'hybrid'):
return g_style_map[atom_style]
else:
column_names = ['atom-ID', 'atom-type', 'x', 'y', 'z']
if (len(hybrid_args) == 0):
raise InputError(
'Error: atom_style hybrid must be followed by a sub_style.\n')
for sub_style in hybrid_args:
if (sub_style not in g_style_map):
raise InputError(
'Error: Unrecognized atom_style: \"' + sub_style + '\"\n')
for cname in g_style_map[sub_style]:
if cname not in column_names:
column_names.append(cname)
return column_names
def ColNames2AidAtypeMolid(column_names):
# Because of the diversity of ways that these
# numbers are referred to in the LAMMPS documentation,
# we have to be flexible and allow the user to refer
# to these quantities in a variety of ways.
# Hopefully this covers everything:
if 'atom-ID' in column_names:
i_atomid = column_names.index('atom-ID')
elif 'atom−ID' in column_names: # (− is the character used in the manual)
i_atomid = column_names.index('atom−ID')
elif 'atomID' in column_names:
i_atomid = column_names.index('atomID')
elif 'atomid' in column_names:
i_atomid = column_names.index('atomid')
elif 'id' in column_names:
i_atomid = column_names.index('id')
elif 'atom' in column_names:
i_atomid = column_names.index('atom')
elif '$atom' in column_names:
i_atomid = column_names.index('$atom')
else:
raise InputError('Error: List of column names lacks an \"atom-ID\"\n')
if 'atom-type' in column_names:
i_atomtype = column_names.index('atom-type')
elif 'atom−type' in column_names: # (− hyphen character used in manual)
i_atomtype = column_names.index('atom−type')
elif 'atomtype' in column_names:
i_atomtype = column_names.index('atomtype')
elif 'type' in column_names:
i_atomtype = column_names.index('type')
elif '@atom' in column_names:
i_atomtype = column_names.index('@atom')
else:
raise InputError(
'Error: List of column names lacks an \"atom-type\"\n')
i_molid = None
if 'molecule-ID' in column_names:
i_molid = column_names.index('molecule-ID')
elif 'molecule−ID' in column_names: # (− hyphen character used in manual)
i_molid = column_names.index('molecule−ID')
elif 'moleculeID' in column_names:
i_molid = column_names.index('moleculeID')
elif 'moleculeid' in column_names:
i_molid = column_names.index('moleculeid')
elif 'molecule' in column_names:
i_molid = column_names.index('molecule')
elif 'molID' in column_names:
i_molid = column_names.index('molID')
elif 'molid' in column_names:
i_molid = column_names.index('molid')
elif 'mol' in column_names:
i_molid = column_names.index('mol')
elif '$mol' in column_names:
i_molid = column_names.index('$mol')
else:
pass # some atom_types do not have a valid molecule-ID
return i_atomid, i_atomtype, i_molid
def ColNames2Coords(column_names):
""" Which of the columns correspond to coordinates
which must be transformed using rigid-body
(affine: rotation + translation) transformations?
This function outputs a list of lists of triplets of integers.
"""
i_x = None
i_y = None
i_z = None
if 'x' in column_names:
i_x = column_names.index('x')
if 'y' in column_names:
i_y = column_names.index('y')
if 'z' in column_names:
i_z = column_names.index('z')
if (((i_x != None) != (i_y != None)) or
((i_y != None) != (i_z != None)) or
((i_z != None) != (i_x != None))):
raise InputError(
'Error: custom atom_style list must define x, y, and z.\n')
return [[i_x, i_y, i_z]]
def ColNames2Vects(column_names):
""" Which of the columns correspond to coordinates
which must be transformed using rotations?
Some coordinates like dipole moments and
ellipsoid orientations should only be rotated
(not translated).
This function outputs a list of lists of triplets of integers.
"""
vects = []
i_mux = None
i_muy = None
i_muz = None
if 'mux' in column_names:
i_mux = column_names.index('mux')
if 'muy' in column_names:
i_muy = column_names.index('muy')
if 'muz' in column_names:
i_muz = column_names.index('muz')
if (((i_mux != None) != (i_muy != None)) or
((i_muy != None) != (i_muz != None)) or
((i_muz != None) != (i_mux != None))):
raise InputError(
'Error: custom atom_style list must define mux, muy, and muz or none.\n')
if i_mux != None:
vects.append([i_mux, i_muy, i_muz])
return vects
| gpl-2.0 |
Sutto/cloud-custodian | tools/c7n_mailer/c7n_mailer/ldap_lookup.py | 5 | 9070 | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import redis
try:
import sqlite3
except ImportError:
have_sqlite = False
else:
have_sqlite = True
from ldap3 import Connection
from ldap3.core.exceptions import LDAPSocketOpenError
class LdapLookup(object):
def __init__(self, config, logger):
self.log = logger
self.connection = self.get_connection(
config.get('ldap_uri'),
config.get('ldap_bind_user', None),
config.get('ldap_bind_password', None)
)
self.base_dn = config.get('ldap_bind_dn')
self.email_key = config.get('ldap_email_key', 'mail')
self.manager_attr = config.get('ldap_manager_attribute', 'manager')
self.uid_key = config.get('ldap_uid_attribute', 'sAMAccountName')
self.attributes = ['displayName', self.uid_key, self.email_key, self.manager_attr]
self.uid_regex = config.get('ldap_uid_regex', None)
self.cache_engine = config.get('cache_engine', None)
if self.cache_engine == 'redis':
redis_host = config.get('redis_host')
redis_port = int(config.get('redis_port', 6379))
self.caching = self.get_redis_connection(redis_host, redis_port)
elif self.cache_engine == 'sqlite':
if not have_sqlite:
raise RuntimeError('No sqlite available: stackoverflow.com/q/44058239')
self.caching = LocalSqlite(config.get('ldap_cache_file', '/var/tmp/ldap.cache'), logger)
def get_redis_connection(self, redis_host, redis_port):
return Redis(redis_host=redis_host, redis_port=redis_port, db=0)
def get_connection(self, ldap_uri, ldap_bind_user, ldap_bind_password):
# note, if ldap_bind_user and ldap_bind_password are None
# an anonymous bind will be attempted.
try:
return Connection(
ldap_uri, user=ldap_bind_user, password=ldap_bind_password,
auto_bind=True,
receive_timeout=30,
auto_referrals=False,
)
except LDAPSocketOpenError:
self.log.error('Not able to establish a connection with LDAP.')
def search_ldap(self, base_dn, ldap_filter, attributes):
self.connection.search(base_dn, ldap_filter, attributes=self.attributes)
if len(self.connection.entries) == 0:
self.log.warning("user not found. base_dn: %s filter: %s", base_dn, ldap_filter)
return {}
if len(self.connection.entries) > 1:
self.log.warning("too many results for search %s", ldap_filter)
return {}
return self.connection.entries[0]
def get_email_to_addrs_from_uid(self, uid, manager=False):
to_addrs = []
uid_metadata = self.get_metadata_from_uid(uid)
uid_email = uid_metadata.get(self.email_key, None)
if uid_email:
to_addrs.append(uid_email)
if manager:
uid_manager_dn = uid_metadata.get(self.manager_attr, None)
uid_manager_email = None
if uid_manager_dn:
uid_manager = self.get_metadata_from_dn(uid_manager_dn)
uid_manager_email = uid_manager.get('mail')
if uid_manager_email:
to_addrs.append(uid_manager_email)
return to_addrs
# eg, dn = uid=bill_lumbergh,cn=users,dc=initech,dc=com
def get_metadata_from_dn(self, user_dn):
if self.cache_engine:
cache_result = self.caching.get(user_dn)
if cache_result:
cache_msg = 'Got ldap metadata from local cache for: %s' % user_dn
self.log.debug(cache_msg)
return cache_result
ldap_filter = '(%s=*)' % self.uid_key
ldap_results = self.search_ldap(user_dn, ldap_filter, attributes=self.attributes)
if ldap_results:
ldap_user_metadata = self.get_dict_from_ldap_object(self.connection.entries[0])
else:
self.caching.set(user_dn, {})
return {}
if self.cache_engine:
self.log.debug('Writing user: %s metadata to cache engine.' % user_dn)
self.caching.set(user_dn, ldap_user_metadata)
self.caching.set(ldap_user_metadata[self.uid_key], ldap_user_metadata)
return ldap_user_metadata
def get_dict_from_ldap_object(self, ldap_user_object):
ldap_user_metadata = {attr.key: attr.value for attr in ldap_user_object}
ldap_user_metadata['dn'] = ldap_user_object.entry_dn
email_key = ldap_user_metadata.get(self.email_key, None)
uid_key = ldap_user_metadata.get(self.uid_key, None)
if not email_key or not uid_key:
return {}
else:
ldap_user_metadata['self.email_key'] = email_key.lower()
ldap_user_metadata['self.uid_key'] = uid_key.lower()
return ldap_user_metadata
# eg, uid = bill_lumbergh
def get_metadata_from_uid(self, uid):
uid = uid.lower()
if self.uid_regex:
# for example if you set ldap_uid_regex in your mailer.yml to "^[0-9]{6}$" then it
# would only query LDAP if your string length is 6 characters long and only digits.
# re.search("^[0-9]{6}$", "123456")
# Out[41]: <_sre.SRE_Match at 0x1109ab440>
# re.search("^[0-9]{6}$", "1234567") returns None, or "12345a' also returns None
if not re.search(self.uid_regex, uid):
regex_msg = 'uid does not match regex: %s %s' % (self.uid_regex, uid)
self.log.debug(regex_msg)
return {}
if self.cache_engine:
cache_result = self.caching.get(uid)
if cache_result or cache_result == {}:
cache_msg = 'Got ldap metadata from local cache for: %s' % uid
self.log.debug(cache_msg)
return cache_result
ldap_filter = '(%s=%s)' % (self.uid_key, uid)
ldap_results = self.search_ldap(self.base_dn, ldap_filter, attributes=self.attributes)
if ldap_results:
ldap_user_metadata = self.get_dict_from_ldap_object(self.connection.entries[0])
if self.cache_engine:
self.log.debug('Writing user: %s metadata to cache engine.' % uid)
if ldap_user_metadata.get('dn'):
self.caching.set(ldap_user_metadata['dn'], ldap_user_metadata)
self.caching.set(uid, ldap_user_metadata)
else:
self.caching.set(uid, {})
else:
if self.cache_engine:
self.caching.set(uid, {})
return {}
return ldap_user_metadata
# Use sqlite as a local cache for folks not running the mailer in lambda, avoids extra daemons
# as dependencies. This normalizes the methods to set/get functions, so you can interchangeable
# decide which caching system to use, a local file, or memcache, redis, etc
# If you don't want a redis dependency and aren't running the mailer in lambda this works well
class LocalSqlite(object):
def __init__(self, local_filename, logger):
self.log = logger
self.sqlite = sqlite3.connect(local_filename)
self.sqlite.execute('''CREATE TABLE IF NOT EXISTS ldap_cache(key text, value text)''')
def get(self, key):
sqlite_result = self.sqlite.execute("select * FROM ldap_cache WHERE key=?", (key,))
result = sqlite_result.fetchall()
if len(result) != 1:
error_msg = 'Did not get 1 result from sqlite, something went wrong with key: %s' % key
self.log.error(error_msg)
return None
return json.loads(result[0][1])
def set(self, key, value):
# note, the ? marks are required to ensure escaping into the database.
self.sqlite.execute("INSERT INTO ldap_cache VALUES (?, ?)", (key, json.dumps(value)))
self.sqlite.commit()
# redis can't write complex python objects like dictionaries as values (the way memcache can)
# so we turn our dict into a json string when setting, and json.loads when getting
class Redis(object):
def __init__(self, redis_host=None, redis_port=6379, db=0):
self.connection = redis.StrictRedis(host=redis_host, port=redis_port, db=db)
def get(self, key):
cache_value = self.connection.get(key)
if cache_value:
return json.loads(cache_value)
def set(self, key, value):
return self.connection.set(key, json.dumps(value))
| apache-2.0 |
40223110/2015cd_midterm- | static/Brython3.1.1-20150328-091302/Lib/unittest/loader.py | 739 | 13883 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import functools
from fnmatch import fnmatch
from . import case, suite, util
__unittest = True
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc())
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
def _jython_aware_splitext(path):
if path.lower().endswith('$py.class'):
return path[:-9]
return os.path.splitext(path)[0]
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = staticmethod(util.three_way_cmp)
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, case.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, case.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.FunctionType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
name = parts[-1]
inst = parent(name)
# static methods follow a different path
if not isinstance(getattr(inst, name), types.FunctionType):
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
if callable(obj):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
callable(getattr(testCaseClass, attrname))
testFnNames = list(filter(isTestMethod, dir(testCaseClass)))
if self.sortTestMethodsUsing:
testFnNames.sort(key=functools.cmp_to_key(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them and return all
tests found within them. Only test files that match the pattern will
be loaded. (Using shell style pattern matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_directory_containing_module(self, module_name):
module = sys.modules[module_name]
full_path = os.path.abspath(module.__file__)
if os.path.basename(full_path).lower().startswith('__init__.py'):
return os.path.dirname(os.path.dirname(full_path))
else:
# here we have been given a module rather than a package - so
# all we can do is search the *same* directory the module is in
# should an exception be raised instead
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
path = _jython_aware_splitext(os.path.normpath(path))
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = _jython_aware_splitext(os.path.realpath(mod_file))
fullpath_noext = _jython_aware_splitext(os.path.realpath(full_path))
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = _jython_aware_splitext(os.path.basename(full_path))
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=util.three_way_cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(
testCaseClass)
def findTestCases(module, prefix='test', sortUsing=util.three_way_cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(\
module)
| gpl-3.0 |
coursemdetw/2014c2 | w2/static/Brython2.0.0-20140209-164925/Lib/test/support.py | 111 | 67787 | """Supporting definitions for the Python regression tests."""
if __name__ != 'test.support':
raise ImportError('support must be imported from the test package')
import contextlib
import errno
import functools
import gc
import socket
import sys
import os
import platform
import shutil
import warnings
import unittest
import importlib
import collections.abc
import re
import subprocess
import imp
import time
import sysconfig
import fnmatch
import logging.handlers
import struct
import tempfile
import _testcapi
try:
import _thread, threading
except ImportError:
_thread = None
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
try:
import zlib
except ImportError:
zlib = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
__all__ = [
"Error", "TestFailed", "ResourceDenied", "import_module", "verbose",
"use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "requires_freebsd_version",
"requires_linux_version", "requires_mac_ver", "find_unused_port",
"bind_port", "IPV6_ENABLED", "is_jython", "TESTFN", "HOST", "SAVEDCWD",
"temp_cwd", "findfile", "create_empty_file", "sortdict",
"check_syntax_error", "open_urlresource", "check_warnings", "CleanImport",
"EnvironmentVarGuard", "TransientResource", "captured_stdout",
"captured_stdin", "captured_stderr", "time_out", "socket_peer_reset",
"ioerror_peer_reset", "run_with_locale", 'temp_umask',
"transient_internet", "set_memlimit", "bigmemtest", "bigaddrspacetest",
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
"threading_cleanup", "reap_children", "cpython_only", "check_impl_detail",
"get_attribute", "swap_item", "swap_attr", "requires_IEEE_754",
"TestHandler", "Matcher", "can_symlink", "skip_unless_symlink",
"skip_unless_xattr", "import_fresh_module", "requires_zlib",
"PIPE_MAX_SIZE", "failfast", "anticipate_failure", "run_with_tz",
"requires_bz2", "requires_lzma", "suppress_crash_popup",
]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect."""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported."""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise."""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def anticipate_failure(condition):
"""Decorator to mark a test that is known to be broken in some cases
Any use of this decorator should have a comment identifying the
associated tracker issue.
"""
if condition:
return unittest.expectedFailure
return lambda f: f
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Imports and returns a module, deliberately bypassing the sys.modules cache
and importing a fresh copy of the module. Once the import is complete,
the sys.modules cache is restored to its original state.
Modules named in fresh are also imported anew if needed by the import.
If one of these modules can't be imported, None is returned.
Importing of modules named in blocked is prevented while the fresh import
takes place.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
# to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("object %r has no attribute %r" % (obj, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
failfast = False
match_tests = None
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Peform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existance of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except OSError as error:
# The filename need not exist.
if error.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
def rmdir(dirname):
try:
_rmdir(dirname)
except OSError as error:
# The directory need not exist.
if error.errno != errno.ENOENT:
raise
def rmtree(path):
try:
_rmtree(path)
except OSError as error:
if error.errno != errno.ENOENT:
raise
def make_legacy_pyc(source):
"""Move a PEP 3147 pyc/pyo file to its legacy pyc/pyo location.
The choice of .pyc or .pyo extension is done based on the __debug__ flag
value.
:param source: The file system path to the source file. The source file
does not need to exist, however the PEP 3147 pyc file must exist.
:return: The file system path to the legacy pyc file.
"""
pyc_file = imp.cache_from_source(source)
up_one = os.path.dirname(os.path.abspath(source))
legacy_pyc = os.path.join(up_one, source + ('c' if __debug__ else 'o'))
os.rename(pyc_file, legacy_pyc)
return legacy_pyc
def forget(modname):
"""'Forget' a module was ever imported.
This removes the module from sys.modules and deletes any PEP 3147 or
legacy .pyc and .pyo files.
"""
unload(modname)
for dirname in sys.path:
source = os.path.join(dirname, modname + '.py')
# It doesn't matter if they exist or not, unlink all possible
# combinations of PEP 3147 and legacy pyc and pyo files.
unlink(source + 'c')
unlink(source + 'o')
unlink(imp.cache_from_source(source, debug_override=True))
unlink(imp.cache_from_source(source, debug_override=False))
# On some platforms, should not run gui test even if it is allowed
# in `use_resources'.
if sys.platform.startswith('win'):
import ctypes
import ctypes.wintypes
def _is_gui_available():
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
return bool(uof.dwFlags & WSF_VISIBLE)
else:
def _is_gui_available():
return True
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is
executing.
"""
if resource == 'gui' and not _is_gui_available():
raise unittest.SkipTest("Cannot use the 'gui' resource")
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe(1).f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the %r resource not enabled" % resource
raise ResourceDenied(msg)
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version):
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
return True
except (socket.error, socket.gaierror):
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
# A constant likely larger than the underlying OS pipe buffer size.
# Windows limit seems to be around 512B, and many Unix kernels have a 64K pipe
# buffer size or 16*PAGE_SIZE: take a few megs to be sure. This
PIPE_MAX_SIZE = 3 * 1000 * 1000
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
requires_zlib = unittest.skipUnless(zlib, 'requires zlib')
requires_bz2 = unittest.skipUnless(bz2, 'requires bz2')
requires_lzma = unittest.skipUnless(lzma, 'requires lzma')
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
# or None if there is no such character.
FS_NONASCII = None
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
'\u00E6',
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
'\u0130',
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
'\u0141',
# U+03C6 (Greek Small Letter Phi): cp1253
'\u03C6',
# U+041A (Cyrillic Capital Letter Ka): cp1251
'\u041A',
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
'\u05D0',
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
'\u060C',
# U+062A (Arabic Letter Teh): cp720
'\u062A',
# U+0E01 (Thai Character Ko Kai): cp874
'\u0E01',
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
'\u00A0',
# U+20AC (Euro Sign)
'\u20AC',
):
try:
os.fsdecode(os.fsencode(character))
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name in ('nt', 'ce'):
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(TESTFN_ENCODING)
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). '
'Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, TESTFN_ENCODING))
TESTFN_UNENCODABLE = None
# Mac OS X denies unencodable filenames (invalid utf-8)
elif sys.platform != 'darwin':
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN \
+ b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
# decoded from the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename (ex: the latin1 encoding can decode any byte
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
# to the surrogateescape error handler (PEP 383), but not from the filesystem
# encoding in strict mode.
TESTFN_UNDECODABLE = None
''' #fixme brython
for name in (
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
# accepts it to create a file or a directory, or don't accept to enter to
# such directory (when the bytes name is used). So test b'\xe7' first: it is
# not decodable from cp932.
b'\xe7w\xf0',
# undecodable from ASCII, UTF-8
b'\xff',
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
# and cp857
b'\xae\xd5'
# undecodable from UTF-8 (UNIX and Mac OS X)
b'\xed\xb2\x80', b'\xed\xb4\x80',
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
# cp1253, cp1254, cp1255, cp1257, cp1258
b'\x81\x98',
):
try:
name.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
TESTFN_UNDECODABLE = os.fsencode(TESTFN) + name
break
'''
if FS_NONASCII:
TESTFN_NONASCII = TESTFN + '-' + FS_NONASCII
else:
TESTFN_NONASCII = None
# Save the initial cwd
SAVEDCWD = os.getcwd()
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False, path=None):
"""
Context manager that temporarily changes the CWD.
An existing path may be provided as *path*, in which case this
function makes no changes to the file system.
Otherwise, the new CWD is created in the current directory and it's
named *name*. If *quiet* is False (default) and it's not possible to
create or change the CWD, an error is raised. If it's True, only a
warning is raised and the original CWD is used.
"""
saved_dir = os.getcwd()
is_temporary = False
if path is None:
path = name
try:
os.mkdir(name)
is_temporary = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to create temp CWD ' + name,
RuntimeWarning, stacklevel=3)
try:
os.chdir(path)
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change the CWD to ' + path,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
if is_temporary:
rmtree(name)
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
def findfile(file, here=__file__, subdir=None):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
if subdir is not None:
file = os.path.join(subdir, file)
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
check = kw.pop('check', None)
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(os.path.dirname(__file__), "data", filename)
def check_valid_file(fn):
f = open(fn, *args, **kw)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
print('\tfetching %s ...' % url, file=get_original_stdout())
f = urllib.request.urlopen(url, timeout=15)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource %r' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(IOError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(socket.error, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(IOError, errno=errno.ECONNRESET)
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except IOError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], IOError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], IOError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as s:
print("hello")
self.assertEqual(s.getvalue(), "hello")
"""
return captured_output("stdout")
def captured_stderr():
return captured_output("stderr")
def captured_stdin():
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
@contextlib.contextmanager
def disable_gc():
have_gc = gc.isenabled()
gc.disable()
try:
yield
finally:
if have_gc:
gc.enable()
def python_is_optimized():
"""Find if Python was built with optimizations."""
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt != '' and final_opt != '-O0'
_header = 'nP'
_align = '0n'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + _align)
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + _align)
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
"""
def __init__(self):
self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid())
self.started = False
def start(self):
try:
f = open(self.procfile, 'r')
except OSError as e:
warnings.warn('/proc not available for stats: {}'.format(e),
RuntimeWarning)
sys.stderr.flush()
return
watchdog_script = findfile("memory_watchdog.py")
self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script],
stdin=f, stderr=subprocess.DEVNULL)
f.close()
self.started = True
def stop(self):
if self.started:
self.mem_watchdog.terminate()
self.mem_watchdog.wait()
def bigmemtest(size, memuse, dry_run=True):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it.
if 'dry_run' is False, it means the test doesn't support dummy runs
when -M is not specified.
"""
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (size * memuse / (1024 ** 3)))
if real_max_memuse and verbose:
print()
print(" ... expected peak memory use: {peak:.1f}G"
.format(peak=size * memuse / (1024 ** 3)))
watchdog = _MemoryWatchdog()
watchdog.start()
else:
watchdog = None
try:
return f(self, maxsize)
finally:
if watchdog:
watchdog.stop()
wrapper.size = size
wrapper.memuse = memuse
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31:
raise unittest.SkipTest(
"not enough memory: try a 32-bit build instead")
else:
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (MAX_Py_ssize_t / (1024 ** 3)))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip("resource 'gui' is not available")
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def no_tracing(func):
"""Decorator to temporarily turn off tracing for the duration of a test."""
if not hasattr(sys, 'gettrace'):
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def refcount_test(test):
"""Decorator for tests which involve reference counting.
To start, the decorator does not run the test if is not run by CPython.
After that, any trace function is unset during the test to prevent
unexpected refcounts caused by the trace function.
"""
return no_tracing(cpython_only(test))
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
def case_pred(test):
if match_tests is None:
return True
for name in test.id().split("."):
if fnmatch.fnmatchcase(name, match_tests):
return True
return False
_filter_suite(suite, case_pred)
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
sys.platform == 'win32' or
sysconfig.get_config_var('WITH_DOC_STRINGS'))
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None, optionflags=0):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Support for saving and restoring the imported modules.
def modules_setup():
return sys.modules.copy(),
def modules_cleanup(oldmodules):
# Encoders/decoders are registered permanently within the internal
# codec cache. If we destroy the corresponding modules their
# globals will be set to None which will trip up the cached functions.
encodings = [(k, v) for k, v in sys.modules.items()
if k.startswith('encodings.')]
sys.modules.clear()
sys.modules.update(encodings)
# XXX: This kind of problem can affect more than just encodings. In particular
# extension modules (such as _ssl) don't cope with reloading properly.
# Really, test modules should be cleaning out the test specific modules they
# know they added (ala test_runpy) rather than relying on this function (as
# test_importhooks and test_pkg do currently).
# Implicitly imported *real* modules should be left alone (see issue 10556).
sys.modules.update(oldmodules)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if _thread:
return _thread._count(), threading._dangling.copy()
else:
return 1, ()
def threading_cleanup(*original_values):
if not _thread:
return
_MAX_COUNT = 10
for count in range(_MAX_COUNT):
values = _thread._count(), threading._dangling
if values == original_values:
break
time.sleep(0.1)
gc_collect()
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not _thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
@contextlib.contextmanager
def swap_item(obj, item, new_val):
"""Temporary swap out an item with a new object.
Usage:
with swap_item(obj, "item", 5):
...
This will set obj["item"] to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `item` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if item in obj:
real_val = obj[item]
obj[item] = new_val
try:
yield
finally:
obj[item] = real_val
else:
obj[item] = new_val
try:
yield
finally:
del obj[item]
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs\]\r?\n?", b"", stderr).strip()
return stderr
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
return subprocess._args_from_interpreter_flags()
#============================================================
# Support for assertions about logging.
#============================================================
class TestHandler(logging.handlers.BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
logging.handlers.BufferingHandler.__init__(self, 0)
self.matcher = matcher
def shouldFlush(self):
return False
def emit(self, record):
self.format(record)
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_can_xattr = None
def can_xattr():
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
tmp_fp, tmp_name = tempfile.mkstemp()
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match("2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
if sys.platform.startswith('win'):
@contextlib.contextmanager
def suppress_crash_popup():
"""Disable Windows Error Reporting dialogs using SetErrorMode."""
# see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621%28v=vs.85%29.aspx
# GetErrorMode is not available on Windows XP and Windows Server 2003,
# but SetErrorMode returns the previous value, so we can use that
import ctypes
k32 = ctypes.windll.kernel32
SEM_NOGPFAULTERRORBOX = 0x02
old_error_mode = k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
k32.SetErrorMode(old_error_mode | SEM_NOGPFAULTERRORBOX)
try:
yield
finally:
k32.SetErrorMode(old_error_mode)
else:
# this is a no-op for other platforms
@contextlib.contextmanager
def suppress_crash_popup():
yield
def patch(test_instance, object_to_patch, attr_name, new_value):
"""Override 'object_to_patch'.'attr_name' with 'new_value'.
Also, add a cleanup procedure to 'test_instance' to restore
'object_to_patch' value for 'attr_name'.
The 'attr_name' should be a valid attribute for 'object_to_patch'.
"""
# check that 'attr_name' is a real attribute for 'object_to_patch'
# will raise AttributeError if it does not exist
getattr(object_to_patch, attr_name)
# keep a copy of the old value
attr_is_local = False
try:
old_value = object_to_patch.__dict__[attr_name]
except (AttributeError, KeyError):
old_value = getattr(object_to_patch, attr_name, None)
else:
attr_is_local = True
# restore the value when the test is done
def cleanup():
if attr_is_local:
setattr(object_to_patch, attr_name, old_value)
else:
delattr(object_to_patch, attr_name)
test_instance.addCleanup(cleanup)
# actually override the attribute
setattr(object_to_patch, attr_name, new_value)
| gpl-2.0 |
PythonSanSebastian/epcon | conference/forms.py | 1 | 25437 | # -*- coding: UTF-8 -*-
from django import forms
from django.conf import settings as dsettings
from django.contrib.admin import widgets as admin_widgets
from django.core import mail
from django.db import transaction
from django.forms import widgets
from django.forms.util import flatatt
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from conference import models
from conference import settings
from p3 import utils as p3utils
from taggit.forms import TagField
import logging
log = logging.getLogger('conference.tags')
### Helpers
def mass_mail(messages, data, addresses, feedback_address):
# Mass send the emails
mail.send_mass_mail(messages)
# Send feedback mail
ctx = dict(data)
ctx['addresses'] = '\n'.join(addresses)
feedback_email = ("""
message sent
-------------------------------
FROM: %(from_)s
SUBJECT: %(subject)s
BODY:
%(body)s
-------------------------------
sent to:
%(addresses)s
""" % ctx)
mail.send_mail(
'[%s] feedback mass mailing (admin stats)' % settings.CONFERENCE,
feedback_email,
dsettings.DEFAULT_FROM_EMAIL,
recipient_list=[feedback_address],
)
###
def validate_tags(tags):
"""
Returns only tags that are already present in the database
and limits the results to 5
"""
valid_tags = models.ConferenceTag.objects.filter(name__in=tags).values_list('name', flat=True)
tags_limited = valid_tags[:5]
tags = u', '.join(tags_limited)
log.debug(u'validated tags: {}'.format(tags))
return tags_limited
class TagWidget(widgets.TextInput):
def _media(self):
return forms.Media(
js=('conference/tag-it/js/tag-it.js',),
css={'all': ('conference/tag-it/css/jquery.tagit.css',)},
)
media = property(_media)
def render(self, name, value, attrs=None):
if value is None:
value = ''
else:
if not isinstance(value, basestring):
names = []
for v in value:
if isinstance(v, basestring):
names.append(v)
elif isinstance(v, models.ConferenceTag):
names.append(v.name)
else:
names.append(v.tag.name)
value = ','.join(names)
final_attrs = self.build_attrs(attrs, type='text', name=name)
final_attrs['class'] = (final_attrs.get('class', '') + ' tag-field').strip()
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(self._format_value(value))
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class ReadonlyTagWidget(widgets.TextInput):
def render(self, name, value, attrs=None):
if value is None:
value = ''
else:
if not isinstance(value, basestring):
names = []
for v in value:
if isinstance(v, basestring):
names.append(v)
elif isinstance(v, models.ConferenceTag):
names.append(v.name)
else:
names.append(v.tag.name)
value = ','.join(names)
final_attrs = self.build_attrs(attrs, type='text', name=name)
final_attrs['class'] = (final_attrs.get('class', '') + ' readonly-tag-field').strip()
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(self._format_value(value))
return mark_safe(u'<input%s /><script>setup_tag_field("#%s")</script>' % (flatatt(final_attrs), final_attrs['id']))
# MarkEditWidget adattameto del codice di esempio presente in
# http://tstone.github.com/jquery-markedit/
class MarkEditWidget(forms.Textarea):
class Media:
css = {
'all': ('conference/jquery-markedit/jquery.markedit.css',),
}
js = (
'conference/jquery-markedit/showdown.js',
'conference/jquery-markedit/jquery.markedit.js',
)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
attrs['class'] = (attrs.get('class', '') + ' markedit-widget').strip()
return super(MarkEditWidget, self).render(name, value, attrs)
class AdminMarkEdit(admin_widgets.AdminTextareaWidget, MarkEditWidget):
pass
class PseudoRadioWidget(forms.TextInput):
def render(self, name, value, attrs=None):
pass
class PseudoRadioRenderer(forms.widgets.RadioFieldRenderer):
def render(self):
h = '<div class="%(class)s" data-value="%(value)s"><span>%(label)s</span></div>'
choiches = []
for w in self:
p = {
'class': 'pseudo-radio',
'value': w.choice_value,
'label': w.choice_label,
}
if w.is_checked():
p['class'] += ' checked'
choiches.append(h % p)
output = '<div class="pseudo-radio-field"><input type="hidden" name="%s" value="%s" />%s</div>'
return mark_safe(output % (self.name, self.value, ''.join(choiches)))
class TalkBaseForm(forms.Form):
# Talk details
title = forms.CharField(
label=_('Title'),
max_length=80,
widget=forms.TextInput(attrs={'size': 40}),
help_text=_('A descriptive, concise title with max 80 chars, e.g. "Big Data Visualization in the Browser with Bokeh"'))
sub_title = forms.CharField(
label=_('Subtitle'),
help_text=_('Juice up your title with max. 100 chars, e.g. "Interactively visualize big data with high performance."'),
max_length=100,
widget=forms.TextInput(attrs={'size': 40}),
required=False)
abstract = forms.CharField(
max_length=1500,
label=_('Abstract (longer version)'),
help_text=_('<p>Description of the session proposal you are submitting. Be sure to include the goals and any prerequisite required to fully understand it. See the section <em>Submitting Your Talk, Trainings, Helpdesk or Poster</em> of the CFP for further details.</p><p>Suggested size: 1500 chars.</p>'),
widget=MarkEditWidget)
abstract_short = forms.CharField(
max_length=500,
label=_('Abstract (short version)'),
help_text=_('<p>Please enter a short version of your abstract. We need a short description e.g. for YouTube and other distribution channels with limited space for abstracts.</p><p>Suggested size: <500 chars.</p>'),
widget=MarkEditWidget)
prerequisites = forms.CharField(
label=_('Prerequisites for attending the session'),
help_text=_('What should attendees be familiar with already, important for intermediate and advanced talks.<br />E.g. data visualization basics, data analysis'),
max_length=150,
widget=forms.TextInput(attrs={'size': 40}),
required=False)
language = forms.TypedChoiceField(
help_text=_('Select a non-English language only if you are not comfortable in speaking English.'),
choices=settings.TALK_SUBMISSION_LANGUAGES,
initial='en',)
level = forms.TypedChoiceField(
label=_('Audience level'),
help_text=_('Please choose a level suitable for the session. People attending the session will expect their skill level to be expected, so a talk for advanced Python users should have advanced level content.'),
choices=models.TALK_LEVEL,
initial='beginner')
# Talk tags
tags = TagField(
help_text=_('<p>Please add up to five (5) tags from the shown categories which are relevant to your session proposal. You can also suggest a track (see ">>> Suggested Track" category) for your talk. Only 5 tags will be saved; additional tags are discarded.</p>'),
widget=TagWidget)
# Details for talk review
abstract_extra = forms.CharField(
label=_('Additional information for talk reviewers'),
help_text=_('<p>Please add anything you may find useful for the review of your session proposal, e.g. references of where you have held talks, blogs, YouTube channels, books you have written, etc. This information will only be shown for talk review purposes.</p>'),
widget=MarkEditWidget,
required=False)
# This form is used for new talk submissions and only when the speaker
# has not yet submitted another talk; see TalkForm for talk
# editing and additional talks.
class SubmissionForm(forms.Form):
"""
Form per la submission del primo paper; include campi che andranno a
popolare sia il profilo dello speaker che i dati del talk. Vengono
richiesti i soli dati essenziali.
"""
# Speaker details
first_name = forms.CharField(
label=_('First name'),
max_length=30)
last_name = forms.CharField(
label=_('Last name'),
max_length=30)
birthday = forms.DateField(
label=_('Date of birth'),
help_text=_('Format: YYYY-MM-DD<br />This date will <strong>never</strong> be published.'),
input_formats=('%Y-%m-%d',),
widget=forms.DateInput(attrs={'size': 10, 'maxlength': 10}))
job_title = forms.CharField(
label=_('Job title'),
help_text=_('eg: student, developer, CTO, js ninja, BDFL'),
max_length=50,
required=False,)
phone = forms.CharField(
help_text=_('We require a mobile number for all speakers for important last minutes contacts.<br />Use the international format, eg: +39-055-123456.<br />This number will <strong>never</strong> be published.'),
max_length=30)
company = forms.CharField(
label=_('Your company'),
max_length=50,
required=False)
company_homepage = forms.URLField(
label=_('Company homepage'),
required=False)
bio = forms.CharField(
label=_('Compact biography'),
help_text=_('Please enter a short biography (one or two paragraphs) <br />Do not paste your CV!'),
widget=forms.Textarea())
'''
#Talk details OLD VERSION (FOR PYSS)
title = forms.CharField(label=_('Talk title'), max_length=100, widget=forms.TextInput(attrs={'size': 40}))
type = forms.TypedChoiceField(
label=_('Talk Type'),
help_text=_('Talk Type description'),
choices=models.TALK_TYPE,
initial='s',
required=True, )
#duration = forms.TypedChoiceField(
# label=_('Suggested duration'),
# help_text=_('This is the <b>net duration</b> of the talk, excluding Q&A'),
# choices=models.TALK_DURATION,
# coerce=int,
# initial='30', )
qa_duration = forms.IntegerField(
label=_('Q&A duration'),
initial='0',
required=False, )
language = forms.TypedChoiceField(
help_text=_('Select Spanish or Basque only if you are not comfortable in speaking English.'),
choices=models.TALK_LANGUAGES,
initial='en', )
level = forms.TypedChoiceField(label=_('Audience level'), choices=models.TALK_LEVEL, initial='beginner')
abstract = forms.CharField(
max_length=5000,
label=_('Talk abstract'),
help_text=_(
'<p>Please enter a short description of the talk you are submitting. Be sure to includes the goals of your talk and any prerequisite required to fully understand it.</p><p>Suggested size: two or three paragraphs.</p>'),
widget=forms.Textarea(), )
tags = TagField(widget=TagWidget)
'''
# Talk details NEW VERSION (FOR EP)
title = TalkBaseForm.base_fields['title']
sub_title = "" #TalkBaseForm.base_fields['sub_title']
abstract = TalkBaseForm.base_fields['abstract']
abstract_short = "" #TalkBaseForm.base_fields['abstract_short']
prerequisites = "" #TalkBaseForm.base_fields['prerequisites']
language = TalkBaseForm.base_fields['language']
level = TalkBaseForm.base_fields['level']
tags = TalkBaseForm.base_fields['tags']
abstract_extra = "" #TalkBaseForm.base_fields['abstract_extra']
def __init__(self, user, *args, **kwargs):
try:
profile = user.attendeeprofile
except models.AttendeeProfile.DoesNotExist:
profile = None
data = {
'first_name': user.first_name,
'last_name': user.last_name,
}
if profile:
if profile.birthday is None:
birthday_value = None
else:
birthday_value = profile.birthday.strftime('%Y-%m-%d')
data.update({
'phone': profile.phone,
'birthday': birthday_value,
'job_title': profile.job_title,
'company': profile.company,
'company_homepage': profile.company_homepage,
'bio': getattr(profile.getBio(), 'body', ''),
})
data.update(kwargs.get('initial', {}))
kwargs['initial'] = data
super(SubmissionForm, self).__init__(*args, **kwargs)
self.user = user
#@transaction.commit_on_success
def save(self):
data = self.cleaned_data
user = self.user
user.first_name = data['first_name'].strip()
user.last_name = data['last_name'].strip()
user.save()
profile = models.AttendeeProfile.objects.getOrCreateForUser(user)
profile.phone = data['phone']
profile.birthday = data['birthday']
profile.job_title = data['job_title']
profile.company = data['company']
profile.company_homepage = data['company_homepage']
profile.save()
profile.setBio(data['bio'])
try:
speaker = user.speaker
except models.Speaker.DoesNotExist:
speaker = models.Speaker(user=user)
speaker.save()
talk = models.Talk.objects.createFromTitle(
title=data['title'],
sub_title="", #data['sub_title'],
prerequisites="",#data['prerequisites'],
abstract_short="",#data['abstract_short'],
abstract_extra="",#data['abstract_extra'],
conference=settings.CONFERENCE,
speaker=speaker,
status='proposed',
language=data['language'],
level=data['level'],
type=data['type']
)
talk.save()
talk.setAbstract(data['abstract'])
tags = ', '.join(data['tags'])
log.debug(u'updating form, tags: {}'.format(tags))
if 'tags' in data:
valid_tags = validate_tags(data['tags'])
talk.tags.set(*(valid_tags))
from conference.listeners import new_paper_submission
new_paper_submission.send(sender=speaker, talk=talk)
return talk
class SpeakerForm(forms.Form):
activity = forms.CharField(
label=_('Job title'),
help_text=_('eg: student, developer, CTO, js ninja, BDFL'),
max_length=50,
required=False,)
activity_homepage = forms.URLField(label=_('Personal homepage'), required=False)
company = forms.CharField(label=_('Your company'), max_length=50, required=False)
company_homepage = forms.URLField(label=_('Company homepage'), required=False)
industry = forms.CharField(max_length=50, required=False)
bio = forms.CharField(
label=_('Compact biography'),
help_text=_('Please enter a short biography (one or two paragraphs). Do not paste your CV!'),
widget=forms.Textarea(),)
ad_hoc_description = forms.CharField(label=_('Presentation'), required=False)
_abstract = models.Talk._meta.get_field_by_name('abstracts')[0]
# This form is used in case the speaker has already proposed a talk
# and for editing talks
class TalkForm(forms.ModelForm):
# Talk details
title = TalkBaseForm.base_fields['title']
sub_title = TalkBaseForm.base_fields['sub_title']
abstract = TalkBaseForm.base_fields['abstract']
abstract_short = TalkBaseForm.base_fields['abstract_short']
prerequisites = TalkBaseForm.base_fields['prerequisites']
language = TalkBaseForm.base_fields['language']
level = TalkBaseForm.base_fields['level']
tags = TalkBaseForm.base_fields['tags']
abstract_extra = TalkBaseForm.base_fields['abstract_extra']
class Meta:
model = models.Talk
fields = ('title', 'sub_title','prerequisites', 'abstract_short', 'abstract_extra', 'type', 'language', 'level', 'slides', 'teaser_video', 'tags')
widgets = {
'tags': TagWidget,
}
def __init__(self, *args, **kw):
if kw.get('instance'):
o = kw['instance']
initial = kw.get('initial', {})
data = {}
abstract = o.getAbstract()
if abstract:
data['abstract'] = abstract.body
data.update(initial)
kw['initial'] = data
super(TalkForm, self).__init__(*args, **kw)
def save(self, commit=True, speaker=None):
assert commit, "commit==False not supported yet"
data = self.cleaned_data
pk = self.instance.pk
if not pk:
assert speaker is not None
self.instance = models.Talk.objects.createFromTitle(
title=data['title'], sub_title=data['sub_title'], prerequisites=data['prerequisites'],
abstract_short=data['abstract_short'], abstract_extra=data['abstract_extra'],conference=settings.CONFERENCE, speaker=speaker,
status='proposed', language=data['language'],
level=data['level'], type=data['type']
)
talk = super(TalkForm, self).save(commit=commit)
talk.setAbstract(data['abstract'])
tags = u', '.join(data['tags'])
log.debug(u'updating form, tags: {}'.format(tags))
if 'tags' in data:
valid_tags = validate_tags(data['tags'])
talk.tags.set(*(valid_tags))
if not pk:
from conference.listeners import new_paper_submission
new_paper_submission.send(sender=speaker, talk=self.instance)
return talk
del _abstract
from tagging.models import TaggedItem
from tagging.utils import parse_tag_input
class TrackForm(forms.ModelForm):
class Meta:
model = models.Track
exclude = ('schedule', 'track',)
class EventForm(forms.ModelForm):
event_tracks = forms.ModelMultipleChoiceField(queryset=models.Track.objects.all())
class Meta:
model = models.Event
exclude = ('schedule', 'tracks')
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
if self.instance.id:
self.fields['talk'].queryset = models.Talk.objects\
.filter(conference=self.instance.schedule.conference)
self.fields['event_tracks'].queryset = models.Track.objects\
.filter(schedule__conference=self.instance.schedule.conference)
def clean(self):
data = super(EventForm, self).clean()
if not data['talk'] and not data['custom']:
raise forms.ValidationError('set the talk or the custom text')
return data
class ProfileForm(forms.ModelForm):
bio = forms.CharField(
label=_('Compact biography'),
help_text=_('Please enter a short biography (one or two paragraphs). Do not paste your CV!'),
widget=forms.Textarea(),
required=False,)
class Meta:
model = models.AttendeeProfile
exclude = ('user', 'slug',)
def __init__(self, *args, **kwargs):
i = kwargs.get('instance')
if i:
initial = kwargs.get('initial', {})
initial['bio'] = getattr(i.getBio(), 'body', '')
kwargs['initial'] = initial
super(ProfileForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
profile = super(ProfileForm, self).save(commit)
profile.setBio(self.cleaned_data.get('bio', ''))
return profile
class EventBookingForm(forms.Form):
value = forms.BooleanField(required=False)
def __init__(self, event, user, *args, **kwargs):
super(EventBookingForm, self).__init__(*args, **kwargs)
self.event = event
self.user = user
def clean_value(self):
data = self.cleaned_data.get('value', False)
if data and not models.EventBooking.objects.booking_available(self.event, self.user):
raise forms.ValidationError('sold out')
return data
class AdminSendMailForm(forms.Form):
"""
Form utilizzata dall'admin nella pagina con le statistiche dei biglietti;
permette di inviare una email ad un gruppo di utenti.
"""
from_ = forms.EmailField(max_length=50, initial=dsettings.DEFAULT_FROM_EMAIL)
subject = forms.CharField(max_length=200)
body = forms.CharField(widget=forms.Textarea)
send_email = forms.BooleanField(required=False)
def __init__(self, *args, **kw):
real = kw.pop('real_usage', True)
super(AdminSendMailForm, self).__init__(*args, **kw)
if real:
self.fields['send_email'].required = True
def load_emails(self):
if not settings.ADMIN_TICKETS_STATS_EMAIL_LOG:
return []
try:
f = file(settings.ADMIN_TICKETS_STATS_EMAIL_LOG)
except:
return []
output = []
while True:
try:
msg = {
'from_': eval(f.readline()).strip(),
'subject': eval(f.readline()).strip(),
'body': eval(f.readline()).strip(),
}
except:
break
f.readline()
if msg['from_']:
output.append(msg)
else:
break
return reversed(output)
def save_email(self):
if not settings.ADMIN_TICKETS_STATS_EMAIL_LOG:
return False
data = self.cleaned_data
with file(settings.ADMIN_TICKETS_STATS_EMAIL_LOG, 'a') as f:
f.write('%s\n' % repr(data['from_']))
f.write('%s\n' % repr(data['subject']))
f.write('%s\n' % repr(data['body']))
f.write('------------------------------------------\n')
return True
def preview(self, *uids):
from django.template import Template, Context
from django.contrib.auth.models import User
data = self.cleaned_data
if settings.ADMIN_TICKETS_STATS_EMAIL_LOAD_LIBRARY:
libs = '{%% load %s %%}' % ' '.join(settings.ADMIN_TICKETS_STATS_EMAIL_LOAD_LIBRARY)
else:
libs = ''
tSubject = Template(libs + data['subject'])
tBody = Template(libs + data['body'])
conf = models.Conference.objects.current()
output = []
for u in User.objects.filter(id__in=uids):
ctx = Context({
'user': u,
'conf': conf,
'tickets': p3utils.get_tickets_assigned_to(u),
})
output.append((
tSubject.render(ctx),
tBody.render(ctx),
u,
))
return output
def send_emails(self, uids, feedback_address):
messages = []
addresses = []
data = self.cleaned_data
# Make sure we don't send duplicate emails to the same uid
uids = list(set(uids))
# Prepare emails
for sbj, body, user in self.preview(*uids):
messages.append((sbj, body, data['from_'], [user.email]))
addresses.append('"%s %s" - %s' % (user.first_name, user.last_name, user.email))
# Mass send the emails (in a separate process)
import multiprocessing
process = multiprocessing.Process(
target=mass_mail,
args=(messages, data, addresses, feedback_address))
process.daemon=True
process.start()
# Let it run until completion without joining it again
process = None
return len(messages)
class AttendeeLinkDescriptionForm(forms.Form):
message = forms.CharField(label='A note to yourself (when you met this persone, why you want to stay in touch)', widget=forms.Textarea)
# -- Custom Option Form used for Talk Voting Filters
class OptionForm(forms.Form):
abstracts = forms.ChoiceField(
choices=(('not-voted', 'To be voted'), ('all', 'All'),),
required=False,
initial='not-voted',
widget=forms.RadioSelect(renderer=PseudoRadioRenderer),
)
talk_type = forms.ChoiceField(
choices=settings.TALK_TYPES_TO_BE_VOTED,
required=False,
initial='all',
widget=forms.RadioSelect(renderer=PseudoRadioRenderer),
)
language = forms.ChoiceField(
choices=(('all', 'All'),) + tuple(settings.TALK_SUBMISSION_LANGUAGES),
required=False,
initial='all',
widget=forms.RadioSelect(renderer=PseudoRadioRenderer),
)
order = forms.ChoiceField(
choices=(('vote', 'Vote'), ('speaker', 'Speaker name'),),
required=False,
initial='vote',
widget=forms.RadioSelect(renderer=PseudoRadioRenderer),
)
tags = TagField(
required=False,
widget=ReadonlyTagWidget(),
)
| bsd-2-clause |
USGS-EROS/lcmap-firebird | test/test_pyccd.py | 1 | 6748 | from ccdc import cassandra
from ccdc import ids
from ccdc import pyccd
from cytoolz import first
from cytoolz import get
#from .shared import ccd_schema_names
#from .shared import ccd_format_keys
#from .shared import faux_dataframe
#from .shared import mock_cassandra_read
#from .shared import timeseries_element
from pyspark.sql.types import StructType
from pyspark.rdd import PipelinedRDD
import datetime
import pyspark.sql as spark_sql
import test
def test_algorithm():
assert "lcmap-pyccd" in pyccd.algorithm()
def test_table():
assert "data" == pyccd.table()
def test_schema():
assert type(pyccd.schema()) is StructType
assert set(pyccd.schema().names) == set(test.ccd_schema_names)
def test_dataframe(spark_context, timeseries_rdd):
rdd = pyccd.rdd(ctx=spark_context, timeseries=timeseries_rdd)
dframe = pyccd.dataframe(spark_context, rdd)
assert set(dframe.columns) == set(test.ccd_schema_names)
def test_default():
assert pyccd.default([]) == [{'start_day': 1, 'end_day': 1, 'break_day': 1}]
assert pyccd.default(["foo", "bar"]) == ["foo", "bar"]
def test_format():
chipx = 100
chipy = -100
pixelx = 50
pixely = -50
acquired = '1980/2017'
sday = 1
eday = 3
bday = 2
sdate = datetime.date.fromordinal(sday).isoformat()
edate = datetime.date.fromordinal(eday).isoformat()
bdate = datetime.date.fromordinal(bday).isoformat()
ordinal_dates = [sday, bday, eday]
iwds_dates = [sdate, bdate, edate]
fval = 0.5
pyccd_model = {'magnitude': fval,
'rmse': fval,
'coefficients': (fval, fval),
'intercept': fval}
pyccd_change_model = {'start_day': sday,
'end_day': eday,
'break_day': bday,
'observation_count': 3,
'change_probability': fval,
'curve_qa': fval,
'blue': pyccd_model,
'green': pyccd_model,
'red': pyccd_model,
'nir': pyccd_model,
'swir1': pyccd_model,
'swir2': pyccd_model,
'thermal': pyccd_model}
iwds_change_model = {'cx' : chipx,
'cy' : chipy,
'px' : pixelx,
'py' : pixely,
'sday' : sdate,
'eday' : edate,
'bday' : bdate,
'chprob' : get('change_probability', pyccd_change_model),
'curqa' : get('curve_qa', pyccd_change_model),
'blmag' : fval,
'grmag' : fval,
'remag' : fval,
'nimag' : fval,
's1mag' : fval,
's2mag' : fval,
'thmag' : fval,
'blrmse' : fval,
'grrmse' : fval,
'rermse' : fval,
'nirmse' : fval,
's1rmse' : fval,
's2rmse' : fval,
'thrmse' : fval,
'blcoef' : (fval, fval),
'grcoef' : (fval, fval),
'recoef' : (fval, fval),
'nicoef' : (fval, fval),
's1coef' : (fval, fval),
's2coef' : (fval, fval),
'thcoef' : (fval, fval),
'blint' : fval,
'grint' : fval,
'reint' : fval,
'niint' : fval,
's1int' : fval,
's2int' : fval,
'thint' : fval,
'dates' : iwds_dates,
'mask' : [0, 1, 0]}
pyccd_format = pyccd.format(cx = chipx,
cy = chipy,
px=pixelx,
py=pixely,
dates=ordinal_dates,
ccdresult={'processing_mask': [0, 1, 0],
'change_models': [pyccd_change_model,]})
print("pyccd_format:{}".format(pyccd_format))
print("========================================")
print("\n")
print("iwds_change_model:{}".format(iwds_change_model))
assert first(pyccd_format) == iwds_change_model
def test_detect():
result = pyccd.detect(test.timeseries_element)[0]
assert result['cx'] == -1815585
assert set(result.keys()) == set(test.ccd_format_keys)
def test_rdd(spark_context, timeseries_rdd):
# calling collect, or any other method to realize the results fails
# unless we monkeypatch the function which actually retrieves chip data.
# not sure yet if thats beyond scope for this test or not.
rdd = pyccd.rdd(ctx=spark_context, timeseries=timeseries_rdd)
assert type(rdd) is PipelinedRDD
#def test_read_write(spark_context, sql_context):
# # create a dataframe from an rdd
# rdd = spark_context.parallelize([(100, -100, 200, -200, 33, 44),
# (300, -300, 400, -400, 55, 66)])
# layers = rdd.map(lambda x: spark_sql.Row(chipx=x[0], chipy=x[1], pixelx=x[2], pixely=x[3], sday=x[4], eday=x[5]))
# context = spark_sql.SQLContext(spark_context)
# dataframe = context.createDataFrame(layers)
#
# # test write
# written_dataframe = pyccd.write(spark_context, dataframe)
# assert type(written_dataframe) is spark_sql.dataframe.DataFrame
#
# # test read
# ids_rdd = rdd.map(lambda x: spark_sql.Row(chipx=x[0], chipy=x[1]))
# ids_df = ids.dataframe(spark_context, ids_rdd, ids.chip_schema())
# read_dataframe = pyccd.read(spark_context, ids_df)
# assert type(read_dataframe) is spark_sql.dataframe.DataFrame
# assert set([i.asDict()["chipx"] for i in read_dataframe.collect()]) == set([100, 300])
#def test_join(sql_context):
# df_attrs1 = ['cx', 'cy', 'px', 'py', 'sday', 'eday', 'rfrawp']
# df_attrs2 = ['cx', 'cy', 'px', 'py', 'sday', 'eday', 'srb3']
# ccd_df = faux_dataframe(ctx=sql_context, attrs=df_attrs1)
# pred_df = faux_dataframe(ctx=sql_context, attrs=df_attrs2)
# joined_df = pyccd.join(ccd=ccd_df, predictions=pred_df)
# assert set(['cx', 'cy', 'px', 'py', 'sday', 'eday', 'srb3']) == set(joined_df.schema.names)
| unlicense |
sencha/chromium-spacewalk | tools/perf/measurements/memory_pressure.py | 7 | 1856 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Multi tab memory test.
This test is a multi tab test, but we're interested in measurements for
the entire test rather than each single page.
"""
import logging
from metrics import histogram_util
from telemetry.page import page_test
class MemoryPressure(page_test.PageTest):
def __init__(self, *args, **kwargs):
super(MemoryPressure, self).__init__(*args, **kwargs)
# _first_tab is used to access histograms
self._is_first_page = True
self._tab_count = 0
# Allow histogram collection
def CustomizeBrowserOptions(self, options):
histogram_util.CustomizeBrowserOptions(options)
# Open a new tab at each page
def TabForPage(self, page, browser):
return browser.tabs.New()
def GetTabsHistogramCounts(self, tab):
histogram_type = histogram_util.BROWSER_HISTOGRAM
discard_count = histogram_util.GetHistogramCount(
histogram_type, "Tabs.Discard.DiscardCount", tab)
kill_count = histogram_util.GetHistogramCount(
histogram_type, "Tabs.SadTab.KillCreated", tab)
return (discard_count, kill_count)
def ValidateAndMeasurePage(self, page, tab, results):
# After navigating to each page, check if it triggered tab discards or
# kills.
(discard_count, kill_count) = self.GetTabsHistogramCounts(tab)
# Sanity check for first page
if self._is_first_page:
self._is_first_page = False
assert discard_count == 0 and kill_count == 0
self._tab_count += 1
# End the test at the first kill or discard.
if kill_count > 0 or discard_count > 0:
logging.info("test ending at tab %d, discards = %d, kills = %d" %
(self._tab_count, discard_count, kill_count))
self.RequestExit()
| bsd-3-clause |
2014c2g14/2014c2 | wsgi/static/Brython2.1.0-20140419-113919/Lib/errno.py | 113 | 2776 | """
This module makes available standard errno system symbols.
The value of each symbol is the corresponding integer value,
e.g., on most systems, errno.ENOENT equals the integer 2.
The dictionary errno.errorcode maps numeric codes to symbol names,
e.g., errno.errorcode[2] could be the string 'ENOENT'.
Symbols that are not relevant to the underlying system are not defined.
To map error codes to error messages, use the function os.strerror(),
e.g. os.strerror(2) could return 'No such file or directory'.
"""
errorcode= {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO',
6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN',
12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY',
17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR',
22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY',
27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK',
32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG',
37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG',
43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST',
48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE',
53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT',
59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR',
64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV',
69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT',
74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG',
79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC',
84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK',
89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT',
93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP',
96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE',
99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET',
103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN',
107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT',
111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY',
115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM',
119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT',
123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY',
127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED',
130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
# now put the attributes of the errorcode dict into this modules namespace
_codes=[]
for _num, _code in errorcode.items():
_codes.append('%s=%s' % (_code, _num))
eval(';'.join(_codes))
| gpl-2.0 |
wangming28/syzygy | third_party/virtualenv/files/virtualenv_support/distribute_setup.py | 32 | 15757 | #!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.15"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
| apache-2.0 |
Dazzozo/huawei-kernel-3.4 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
glneo/gnuradio | gr-qtgui/apps/plot_psd_base.py | 47 | 5350 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import os, sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
print "Error: Program requires PyQt4 and gr-qtgui."
sys.exit(1)
try:
import scipy
except ImportError:
print "Error: Scipy required (www.scipy.org)."
sys.exit(1)
try:
from gnuradio.qtgui.plot_form import *
from gnuradio.qtgui.plot_base import *
except ImportError:
from plot_form import *
from plot_base import *
class plot_base(gr.top_block):
def __init__(self, filelist, fc, samp_rate, psdsize, start,
nsamples, max_nsamples, avg=1.0):
gr.top_block.__init__(self)
self._filelist = filelist
self._center_freq = fc
self._samp_rate = samp_rate
self._psd_size = psdsize
self._start = start
self._max_nsamps = max_nsamples
self._nsigs = len(self._filelist)
self._avg = avg
self._nsamps = nsamples
self._auto_scale = False
self._y_min = -200
self._y_max = 400
self._y_range = 130
self._y_value = 10
self._is_setup = False
self.qapp = QtGui.QApplication(sys.argv)
def setup(self):
self.skip = blocks.skiphead(self.dsize, self._start)
n = 0
self.srcs = list()
self._data_min = sys.maxint
self._data_max = -sys.maxint - 1
for f in self._filelist:
data,_min,_max = self.read_samples(f, self._start,
self._nsamps, self._psd_size)
if(_min < self._data_min):
self._data_min = _min
if(_max > self._data_max):
self._data_max = _max
self.srcs.append(self.src_type(data))
# Set default labels based on file names
fname = f.split("/")[-1]
self.gui_snk.set_line_label(n, "{0}".format(fname))
n += 1
self.connect(self.srcs[0], self.skip)
self.connect(self.skip, (self.gui_snk, 0))
for i,s in enumerate(self.srcs[1:]):
self.connect(s, (self.gui_snk, i+1))
self.gui_snk.set_update_time(0)
self.gui_snk.enable_menu(False)
# Get Python Qt references
pyQt = self.gui_snk.pyqwidget()
self.pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
self._is_setup = True
def is_setup(self):
return self._is_setup
def set_y_axis(self, y_min, y_max):
self.gui_snk.set_y_axis(y_min, y_max)
return y_min, y_max
def get_gui(self):
if(self.is_setup()):
return self.pyWin
else:
return None
def reset(self, newstart, newnsamps):
self.stop()
self.wait()
self._start = newstart
self._data_min = sys.maxint
self._data_max = -sys.maxint - 1
for s,f in zip(self.srcs, self._filelist):
data,_min,_max = self.read_samples(f, self._start, newnsamps, self._psd_size)
if(_min < self._data_min):
self._data_min = _min
if(_max > self._data_max):
self._data_max = _max
s.set_data(data)
self.start()
def setup_options(desc):
parser = OptionParser(option_class=eng_option, description=desc,
conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=1000000,
help="Set the number of samples to display [default=prints entire file]")
parser.add_option("-S", "--start", type="int", default=0,
help="Starting sample number [default=%default]")
parser.add_option("-L", "--psd-size", type="int", default=2048,
help="Set the FFT size of the PSD [default=%default]")
parser.add_option("-f", "--center-frequency", type="eng_float", default=0.0,
help="Set the center frequency of the signal [default=%default]")
parser.add_option("-r", "--sample-rate", type="eng_float", default=1.0,
help="Set the sample rate of the signal [default=%default]")
parser.add_option("-a", "--average", type="float", default=1.0,
help="Set amount of averaging (smaller=more averaging) [default=%default]")
(options, args) = parser.parse_args()
if(len(args) < 1):
parser.print_help()
sys.exit(0)
return (options,args)
| gpl-3.0 |
glneo/gnuradio | gr-uhd/examples/python/usrp_wfm_rcv.py | 58 | 11053 | #!/usr/bin/env python
#
# Copyright 2005-2007,2009,2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, audio, uhd
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import slider, powermate
from gnuradio.wxgui import stdgui2, fftsink2, form
from optparse import OptionParser
import sys
import wx
class wfm_rx_block (stdgui2.std_top_block):
def __init__(self,frame,panel,vbox,argv):
stdgui2.std_top_block.__init__ (self,frame,panel,vbox,argv)
parser=OptionParser(option_class=eng_option)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-f", "--freq", type="eng_float", default=100.1e6,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-V", "--volume", type="eng_float", default=None,
help="set volume (default is midpoint)")
parser.add_option("-O", "--audio-output", type="string", default="default",
help="pcm device name. E.g., hw:0,0 or surround51 or /dev/dsp")
parser.add_option("", "--freq-min", type="eng_float", default=87.9e6,
help="Set a minimum frequency [default=%default]")
parser.add_option("", "--freq-max", type="eng_float", default=108.1e6,
help="Set a maximum frequency [default=%default]")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.frame = frame
self.panel = panel
self.vol = 0
self.state = "FREQ"
self.freq = 0
self.fm_freq_min = options.freq_min
self.fm_freq_max = options.freq_max
# build graph
self.u = uhd.usrp_source(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
# Set the subdevice spec
if(options.spec):
self.u.set_subdev_spec(options.spec, 0)
# Set the antenna
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
usrp_rate = 320e3
demod_rate = 320e3
audio_rate = 32e3
audio_decim = int(demod_rate / audio_rate)
self.u.set_samp_rate(usrp_rate)
dev_rate = self.u.get_samp_rate()
nfilts = 32
chan_coeffs = filter.optfir.low_pass(nfilts, # gain
nfilts*usrp_rate, # sampling rate
80e3, # passband cutoff
115e3, # stopband cutoff
0.1, # passband ripple
60) # stopband attenuation
rrate = usrp_rate / dev_rate
self.chan_filt = filter.pfb.arb_resampler_ccf(rrate, chan_coeffs, nfilts)
self.guts = analog.wfm_rcv(demod_rate, audio_decim)
self.volume_control = blocks.multiply_const_ff(self.vol)
# sound card as final sink
self.audio_sink = audio.sink(int (audio_rate),
options.audio_output,
False) # ok_to_block
# now wire it all together
self.connect(self.u, self.chan_filt, self.guts,
self.volume_control, self.audio_sink)
self._build_gui(vbox, usrp_rate, demod_rate, audio_rate)
if options.gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
options.gain = float(g.start()+g.stop())/2
if options.volume is None:
g = self.volume_range()
options.volume = float(g[0]+g[1])/2
frange = self.u.get_freq_range()
if(frange.start() > self.fm_freq_max or frange.stop() < self.fm_freq_min):
sys.stderr.write("Radio does not support required frequency range.\n")
sys.exit(1)
if(options.freq < self.fm_freq_min or options.freq > self.fm_freq_max):
sys.stderr.write("Requested frequency is outside of required frequency range.\n")
sys.exit(1)
# set initial values
self.set_gain(options.gain)
self.set_vol(options.volume)
if not(self.set_freq(options.freq)):
self._set_status_msg("Failed to set initial frequency")
def _set_status_msg(self, msg, which=0):
self.frame.GetStatusBar().SetStatusText(msg, which)
def _build_gui(self, vbox, usrp_rate, demod_rate, audio_rate):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
if 1:
self.src_fft = fftsink2.fft_sink_c(self.panel, title="Data from USRP",
fft_size=512, sample_rate=usrp_rate,
ref_scale=32768.0, ref_level=0, y_divs=12)
self.connect (self.u, self.src_fft)
vbox.Add (self.src_fft.win, 4, wx.EXPAND)
if 1:
post_filt_fft = fftsink2.fft_sink_f(self.panel, title="Post Demod",
fft_size=1024, sample_rate=usrp_rate,
y_per_div=10, ref_level=0)
self.connect (self.guts.fm_demod, post_filt_fft)
vbox.Add (post_filt_fft.win, 4, wx.EXPAND)
if 0:
post_deemph_fft = fftsink2.fft_sink_f(self.panel, title="Post Deemph",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=-20)
self.connect (self.guts.deemph, post_deemph_fft)
vbox.Add (post_deemph_fft.win, 4, wx.EXPAND)
# control area form at bottom
self.myform = myform = form.form()
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['freq'] = form.float_field(
parent=self.panel, sizer=hbox, label="Freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq, self._set_status_msg))
hbox.Add((5,0), 0)
myform['freq_slider'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, weight=3,
range=(self.fm_freq_min, self.fm_freq_max, 0.1e6),
callback=self.set_freq)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['volume'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Volume",
weight=3, range=self.volume_range(),
callback=self.set_vol)
hbox.Add((5,0), 1)
g = self.u.get_gain_range()
myform['gain'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Gain",
weight=3, range=(g.start(), g.stop(), g.step()),
callback=self.set_gain)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
try:
self.knob = powermate.powermate(self.frame)
self.rot = 0
powermate.EVT_POWERMATE_ROTATE (self.frame, self.on_rotate)
powermate.EVT_POWERMATE_BUTTON (self.frame, self.on_button)
except:
print "FYI: No Powermate or Contour Knob found"
def on_rotate (self, event):
self.rot += event.delta
if (self.state == "FREQ"):
if self.rot >= 3:
self.set_freq(self.freq + .1e6)
self.rot -= 3
elif self.rot <=-3:
self.set_freq(self.freq - .1e6)
self.rot += 3
else:
step = self.volume_range()[2]
if self.rot >= 3:
self.set_vol(self.vol + step)
self.rot -= 3
elif self.rot <=-3:
self.set_vol(self.vol - step)
self.rot += 3
def on_button (self, event):
if event.value == 0: # button up
return
self.rot = 0
if self.state == "FREQ":
self.state = "VOL"
else:
self.state = "FREQ"
self.update_status_bar ()
def set_vol (self, vol):
g = self.volume_range()
self.vol = max(g[0], min(g[1], vol))
self.volume_control.set_k(10**(self.vol/10))
self.myform['volume'].set_value(self.vol)
self.update_status_bar ()
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
Args:
target_freq: frequency in Hz
@rypte: bool
"""
r = self.u.set_center_freq(target_freq)
if r:
self.freq = target_freq
self.myform['freq'].set_value(target_freq) # update displayed value
self.myform['freq_slider'].set_value(target_freq) # update displayed value
self.update_status_bar()
self._set_status_msg("OK", 0)
return True
self._set_status_msg("Failed", 0)
return False
def set_gain(self, gain):
self.myform['gain'].set_value(gain) # update displayed value
self.u.set_gain(gain)
def update_status_bar (self):
msg = "Volume:%r Setting:%s" % (self.vol, self.state)
self._set_status_msg(msg, 1)
self.src_fft.set_baseband_freq(self.freq)
def volume_range(self):
return (-20.0, 0.0, 0.5)
if __name__ == '__main__':
app = stdgui2.stdapp (wfm_rx_block, "USRP WFM RX")
app.MainLoop ()
| gpl-3.0 |
yangqun/lily2-gem5 | src/arch/x86/isa/insts/x87/data_transfer_and_conversion/convert_and_load_or_store_bcd.py | 91 | 2157 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FBLD
# FBSTP
'''
| bsd-3-clause |
speedbot/android_kernel_htc_golfu | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
MISS3D/s2p | s2p/block_matching.py | 1 | 12352 | # Copyright (C) 2015, Carlo de Franchis <carlo.de-franchis@ens-cachan.fr>
# Copyright (C) 2015, Gabriele Facciolo <facciolo@cmla.ens-cachan.fr>
# Copyright (C) 2015, Enric Meinhardt <enric.meinhardt@cmla.ens-cachan.fr>
# Copyright (C) 2015, Julien Michel <julien.michel@cnes.fr>
import os
import numpy as np
from s2p import common
from s2p.config import cfg
def rectify_secondary_tile_only(algo):
if algo in ['tvl1_2d']:
return True
else:
return False
def compute_disparity_map(im1, im2, disp, mask, algo, disp_min=None,
disp_max=None, extra_params=''):
"""
Runs a block-matching binary on a pair of stereo-rectified images.
Args:
im1, im2: rectified stereo pair
disp: path to the output diparity map
mask: path to the output rejection mask
algo: string used to indicate the desired binary. Currently it can be
one among 'hirschmuller02', 'hirschmuller08',
'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm',
'msmw', 'tvl1', 'mgm', 'mgm_multi' and 'micmac'
disp_min : smallest disparity to consider
disp_max : biggest disparity to consider
extra_params: optional string with algorithm-dependent parameters
"""
if rectify_secondary_tile_only(algo) is False:
disp_min = [disp_min]
disp_max = [disp_max]
# limit disparity bounds
np.alltrue(len(disp_min) == len(disp_max))
for dim in range(len(disp_min)):
if disp_min[dim] is not None and disp_max[dim] is not None:
image_size = common.image_size_gdal(im1)
if disp_max[dim] - disp_min[dim] > image_size[dim]:
center = 0.5 * (disp_min[dim] + disp_max[dim])
disp_min[dim] = int(center - 0.5 * image_size[dim])
disp_max[dim] = int(center + 0.5 * image_size[dim])
# round disparity bounds
if disp_min[dim] is not None:
disp_min[dim] = int(np.floor(disp_min[dim]))
if disp_max is not None:
disp_max[dim] = int(np.ceil(disp_max[dim]))
if rectify_secondary_tile_only(algo) is False:
disp_min = disp_min[0]
disp_max = disp_max[0]
# define environment variables
env = os.environ.copy()
env['OMP_NUM_THREADS'] = str(cfg['omp_num_threads'])
# call the block_matching binary
if algo == 'hirschmuller02':
bm_binary = 'subpix.sh'
common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
disp_max, extra_params))
# extra_params: LoG(0) regionRadius(3)
# LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled
# regionRadius: radius of the window
if algo == 'hirschmuller08':
bm_binary = 'callSGBM.sh'
common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
disp_max, extra_params))
# extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
# regionRadius: radius of the window
# P1, P2 : regularization parameters
# LRdiff: maximum difference between left and right disparity maps
if algo == 'hirschmuller08_laplacian':
bm_binary = 'callSGBM_lap.sh'
common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
disp_max, extra_params))
if algo == 'hirschmuller08_cauchy':
bm_binary = 'callSGBM_cauchy.sh'
common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
disp_max, extra_params))
if algo == 'sgbm':
# opencv sgbm function implements a modified version of Hirschmuller's
# Semi-Global Matching (SGM) algorithm described in "Stereo Processing
# by Semiglobal Matching and Mutual Information", PAMI, 2008
p1 = 8 # penalizes disparity changes of 1 between neighbor pixels
p2 = 32 # penalizes disparity changes of more than 1
# it is required that p2 > p1. The larger p1, p2, the smoother the disparity
win = 3 # matched block size. It must be a positive odd number
lr = 1 # maximum difference allowed in the left-right disparity check
cost = common.tmpfile('.tif')
common.run('sgbm {} {} {} {} {} {} {} {} {} {}'.format(im1, im2,
disp, cost,
disp_min,
disp_max,
win, p1, p2, lr))
# create rejection mask (0 means rejected, 1 means accepted)
# keep only the points that are matched and present in both input images
common.run('plambda {0} "x 0 join" | backflow - {2} | plambda {0} {1} - "x isfinite y isfinite z isfinite and and" -o {3}'.format(disp, im1, im2, mask))
if algo == 'tvl1':
tvl1 = 'callTVL1.sh'
common.run('{0} {1} {2} {3} {4}'.format(tvl1, im1, im2, disp, mask),
env)
if algo == 'tvl1_2d':
tvl1 = 'callTVL1.sh'
common.run('{0} {1} {2} {3} {4} {5}'.format(tvl1, im1, im2, disp, mask,
1), env)
if algo == 'msmw':
bm_binary = 'iip_stereo_correlation_multi_win2'
common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m {1} -M {2} {3} {4} {5} {6}'.format(bm_binary, disp_min, disp_max, im1, im2, disp, mask))
if algo == 'msmw2':
bm_binary = 'iip_stereo_correlation_multi_win2_newversion'
common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m {1} -M {2} {3} {4} {5} {6}'.format(
bm_binary, disp_min, disp_max, im1, im2, disp, mask), env)
if algo == 'msmw3':
bm_binary = 'msmw'
common.run('{0} -m {1} -M {2} -il {3} -ir {4} -dl {5} -kl {6}'.format(
bm_binary, disp_min, disp_max, im1, im2, disp, mask))
if algo == 'mgm':
env['MEDIAN'] = '1'
env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
env['TSGM'] = '3'
conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0])
common.run('{0} -r {1} -R {2} -s vfit -t census -O 8 {3} {4} {5} -confidence_consensusL {6}'.format('mgm',
disp_min,
disp_max,
im1, im2,
disp, conf),
env)
# produce the mask: rejected pixels are marked with nan of inf in disp
# map
common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))
if algo == 'mgm_multi_lsd':
ref = im1
sec = im2
wref = common.tmpfile('.tif')
wsec = common.tmpfile('.tif')
# TODO TUNE LSD PARAMETERS TO HANDLE DIRECTLY 12 bits images?
# image dependent weights based on lsd segments
image_size = common.image_size_gdal(ref)
common.run('qauto %s | \
lsd - - | \
cut -d\' \' -f1,2,3,4 | \
pview segments %d %d | \
plambda - "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(ref,image_size[0], image_size[1],wref))
# image dependent weights based on lsd segments
image_size = common.image_size_gdal(sec)
common.run('qauto %s | \
lsd - - | \
cut -d\' \' -f1,2,3,4 | \
pview segments %d %d | \
plambda - "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(sec,image_size[0], image_size[1],wsec))
env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter'])
env['SUBPIX'] = '2'
env['MEDIAN'] = '1'
env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
# it is required that p2 > p1. The larger p1, p2, the smoother the disparity
regularity_multiplier = cfg['stereo_regularity_multiplier']
# increasing these numbers compensates the loss of regularity after incorporating LSD weights
P1 = 12*regularity_multiplier # penalizes disparity changes of 1 between neighbor pixels
P2 = 48*regularity_multiplier # penalizes disparity changes of more than 1
conf = disp+'.confidence.tif'
common.run('{0} -r {1} -R {2} -S 6 -s vfit -t census -O 8 -P1 {7} -P2 {8} -wl {3} -wr {4} -confidence_consensusL {10} {5} {6} {9}'.format('mgm_multi',
disp_min,
disp_max,
wref,wsec,
im1, im2,
P1, P2,
disp, conf),
env)
# produce the mask: rejected pixels are marked with nan of inf in disp
# map
common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))
if algo == 'mgm_multi':
env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter'])
env['MINDIFF'] = '1'
env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
env['SUBPIX'] = '2'
# it is required that p2 > p1. The larger p1, p2, the smoother the disparity
regularity_multiplier = cfg['stereo_regularity_multiplier']
P1 = 8*regularity_multiplier # penalizes disparity changes of 1 between neighbor pixels
P2 = 32*regularity_multiplier # penalizes disparity changes of more than 1
conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0])
common.run('{0} -r {1} -R {2} -S 6 -s vfit -t census {3} {4} {5} -confidence_consensusL {6}'.format('mgm_multi',
disp_min,
disp_max,
im1, im2,
disp, conf),
env)
# produce the mask: rejected pixels are marked with nan of inf in disp
# map
common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))
if (algo == 'micmac'):
# add micmac binaries to the PATH environment variable
s2p_dir = os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__))))
micmac_bin = os.path.join(s2p_dir, 'bin', 'micmac', 'bin')
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + micmac_bin
# prepare micmac xml params file
micmac_params = os.path.join(s2p_dir, '3rdparty', 'micmac_params.xml')
work_dir = os.path.dirname(os.path.abspath(im1))
common.run('cp {0} {1}'.format(micmac_params, work_dir))
# run MICMAC
common.run('MICMAC {0:s}'.format(os.path.join(work_dir, 'micmac_params.xml')))
# copy output disp map
micmac_disp = os.path.join(work_dir, 'MEC-EPI',
'Px1_Num6_DeZoom1_LeChantier.tif')
disp = os.path.join(work_dir, 'rectified_disp.tif')
common.run('cp {0} {1}'.format(micmac_disp, disp))
# compute mask by rejecting the 10% of pixels with lowest correlation score
micmac_cost = os.path.join(work_dir, 'MEC-EPI',
'Correl_LeChantier_Num_5.tif')
mask = os.path.join(work_dir, 'rectified_mask.png')
common.run('plambda {0} "x x%q10 < 0 255 if" -o {1}'.format(micmac_cost, mask))
| agpl-3.0 |
Big-B702/python-for-android | python3-alpha/python3-src/Lib/test/test_normalization.py | 62 | 3225 | from test.support import run_unittest, open_urlresource
import unittest
from http.client import HTTPException
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest.txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
def check_version(testfile):
hdr = testfile.readline()
return unidata_version in hdr
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return "".join([chr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part = None
part1_data = {}
# Hit the exception early
try:
testdata = open_urlresource(TESTDATAURL, encoding="utf-8",
check=check_version)
except (IOError, HTTPException):
self.skipTest("Could not retrieve " + TESTDATAURL)
self.addCleanup(testdata.close)
for line in testdata:
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try atleast adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.assertTrue(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.assertTrue(c4 == NFC(c4) == NFC(c5), line)
self.assertTrue(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.assertTrue(c5 == NFD(c4) == NFD(c5), line)
self.assertTrue(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = chr(c)
if X in part1_data:
continue
self.assertTrue(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', '\ud55c\uae00')
def test_main():
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
wenlinyao/RegularTemporalEventPairs | model/generate_new_seed_test.py | 1 | 4331 | from nltk.stem.snowball import SnowballStemmer
from util_class import *
#from nltk.stem.wordnet import WordNetLemmatizer
def transform_format(line):
words = line.split()
for i, word in enumerate(words):
if word in ["<==", "==>", "<==>"] and i != 0:
relation_index = i
#pair = " ".join(words[1:-4])
words[relation_index] = words[0]
#return pair," ".join(words[1:])
return " ".join(words[1:])
def generate_new_seed_test_main(iteration_i, type1_threshold, type2_threshold):
#if __name__ == "__main__":
# iteration_i = "1"
stemmer = SnowballStemmer("english")
#lemma = WordNetLemmatizer()
f = open("error_analysis/" + iteration_i + "/1_Classifier_combine_classifiers", 'r')
previous_seed_list = open("event_pair_train_test/seed_pairs_" + str(int(iteration_i) - 1), 'r')
new_seed_pairs = []
exist_pairs = {}
# only consider event key to do the filtering
exist_pairs_event_only = {}
for line in previous_seed_list:
if not line.strip():
continue
new_seed_pairs.append(line)
eventpair = EventPair(line)
exist_pairs[eventpair.event1 + ' ' + eventpair.event2] = 1
exist_pairs[eventpair.event2 + ' ' + eventpair.event1] = 1
event1_key = stemmer.stem(eventpair.event1_key.replace('[','').replace(']', ''))
event2_key = stemmer.stem(eventpair.event2_key.replace('[','').replace(']', ''))
if eventpair.relation == '<==':
instance = event1_key + ' <== ' + event2_key
r_instance = event2_key + ' <== ' + event1_key
elif eventpair.relation == '==>':
instance = event2_key + ' <== ' + event1_key
r_instance = event1_key + ' <== ' + event2_key
#if r_instance in exist_pairs_event_only:
# print line
# print r_instance
# raw_input('continue?')
if r_instance not in exist_pairs_event_only:
exist_pairs_event_only[instance] = 1
previous_seed_list.close()
for line in f:
if not line.strip():
continue
words = line.split()
if int (words[-1]) < type1_threshold:
continue
if int (words[-1]) < type2_threshold and int(words[-1]) >= type1_threshold and '<==>' in line:
continue
transformed = transform_format(line)
eventpair = EventPair(transformed)
event1_key = stemmer.stem(eventpair.event1_key.replace('[','').replace(']', ''))
event2_key = stemmer.stem(eventpair.event2_key.replace('[','').replace(']', ''))
if eventpair.event1 + ' ' + eventpair.event2 in exist_pairs or eventpair.event2 + ' ' + eventpair.event1 in exist_pairs:
continue
if eventpair.relation == '<==':
instance = event1_key + ' <== ' + event2_key
r_instance = event2_key + ' <== ' + event1_key
elif eventpair.relation == '==>':
instance = event2_key + ' <== ' + event1_key
r_instance = event1_key + ' <== ' + event2_key
if r_instance in exist_pairs_event_only:
#print line
#print r_instance
#raw_input('continue?')
continue
else:
exist_pairs_event_only[instance] = 1
exist_pairs[eventpair.event1 + ' ' + eventpair.event2] = 1
exist_pairs[eventpair.event2 + ' ' + eventpair.event1] = 1
new_seed_pairs.append(transformed + '\n')
f.close()
new_seed_pairs_output = open("event_pair_train_test/seed_pairs_" + str(iteration_i), 'w')
for line in new_seed_pairs:
if not line.strip():
continue
new_seed_pairs_output.write(line)
f = open("event_pair_train_test/test_pairs_" + str(int(iteration_i) - 1), 'r')
new_other_output = open("event_pair_train_test/test_pairs_" + iteration_i, 'w')
#print exist_pairs
for line in f:
if not line.strip():
continue
words = line.split()
eventpair = EventPair(line)
#print eventpair.event1 + ' ' + eventpair.event2
if eventpair.event1 + ' ' + eventpair.event2 in exist_pairs or eventpair.event2 + ' ' + eventpair.event1 in exist_pairs:
continue
new_other_output.write(line)
new_other_output.close()
print "over" | mit |
CyanogenMod/android_kernel_samsung_msm8930-common | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
jholewinski/ics-12-overlapped-tiling | scripts/yaml/representer.py | 359 | 17642 |
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
| mit |
wetneb/django | tests/template_tests/filter_tests/test_dictsortreversed.py | 342 | 1066 | from django.template.defaultfilters import dictsortreversed
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_sort(self):
sorted_dicts = dictsortreversed(
[{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age',
)
self.assertEqual(
[sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]],
)
def test_invalid_values(self):
"""
If dictsortreversed is passed something other than a list of
dictionaries, fail silently.
"""
self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '')
self.assertEqual(dictsortreversed('Hello!', 'age'), '')
self.assertEqual(dictsortreversed({'a': 1}, 'age'), '')
self.assertEqual(dictsortreversed(1, 'age'), '')
| bsd-3-clause |
openstack/horizon | openstack_dashboard/dashboards/identity/identity_providers/protocols/views.py | 1 | 1697 | # Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from openstack_dashboard.dashboards.identity.identity_providers.protocols \
import forms as protocol_forms
class AddProtocolView(forms.ModalFormView):
template_name = 'identity/identity_providers/protocols/create.html'
form_id = "create_protocol_form"
form_class = protocol_forms.AddProtocolForm
submit_label = _("Create Protocol")
success_url = "horizon:identity:identity_providers:protocols_tab"
page_title = _("Create Protocol")
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['identity_provider_id'],))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["submit_url"] = reverse(
"horizon:identity:identity_providers:protocols:create",
args=(self.kwargs['identity_provider_id'],))
return context
def get_initial(self):
return {"idp_id": self.kwargs['identity_provider_id']}
| apache-2.0 |
thaumos/ansible | lib/ansible/modules/cloud/vmware/vmware_host_acceptance.py | 48 | 6958 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_acceptance
short_description: Manage the host acceptance level of an ESXi host
description:
- This module can be used to manage the host acceptance level of an ESXi host.
- The host acceptance level controls the acceptance level of each VIB on a ESXi host.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Acceptance level of all ESXi host system in the given cluster will be managed.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Acceptance level of this ESXi host system will be managed.
- If C(cluster_name) is not given, this parameter is required.
state:
description:
- Set or list acceptance level of the given ESXi host.
- 'If set to C(list), then will return current acceptance level of given host system/s.'
- If set to C(present), then will set given acceptance level.
choices: [ list, present ]
required: False
default: 'list'
acceptance_level:
description:
- Name of acceptance level.
- If set to C(partner), then accept only partner and VMware signed and certified VIBs.
- If set to C(vmware_certified), then accept only VIBs that are signed and certified by VMware.
- If set to C(vmware_accepted), then accept VIBs that have been accepted by VMware.
- If set to C(community), then accept all VIBs, even those that are not signed.
choices: [ community, partner, vmware_accepted, vmware_certified ]
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set acceptance level to community for all ESXi Host in given Cluster
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
acceptance_level: 'community'
state: present
delegate_to: localhost
register: cluster_acceptance_level
- name: Set acceptance level to vmware_accepted for the given ESXi Host
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
acceptance_level: 'vmware_accepted'
state: present
delegate_to: localhost
register: host_acceptance_level
- name: Get acceptance level from the given ESXi Host
vmware_host_acceptance:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
state: list
delegate_to: localhost
register: host_acceptance_level
'''
RETURN = r'''
facts:
description:
- dict with hostname as key and dict with acceptance level facts, error as value
returned: facts
type: dict
sample: { "facts": { "localhost.localdomain": { "error": "NA", "level": "vmware_certified" }}}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VMwareAccpetanceManager(PyVmomi):
def __init__(self, module):
super(VMwareAccpetanceManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.desired_state = self.params.get('state')
self.hosts_facts = {}
self.acceptance_level = self.params.get('acceptance_level')
def gather_acceptance_facts(self):
for host in self.hosts:
self.hosts_facts[host.name] = dict(level='', error='NA')
host_image_config_mgr = host.configManager.imageConfigManager
if host_image_config_mgr:
try:
self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
except vim.fault.HostConfigFault as e:
self.hosts_facts[host.name]['error'] = to_native(e.msg)
def set_acceptance_level(self):
change = []
for host in self.hosts:
host_changed = False
if self.hosts_facts[host.name]['level'] != self.acceptance_level:
host_image_config_mgr = host.configManager.imageConfigManager
if host_image_config_mgr:
try:
if self.module.check_mode:
self.hosts_facts[host.name]['level'] = self.acceptance_level
else:
host_image_config_mgr.UpdateHostImageAcceptanceLevel(newAcceptanceLevel=self.acceptance_level)
self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance()
host_changed = True
except vim.fault.HostConfigFault as e:
self.hosts_facts[host.name]['error'] = to_native(e.msg)
change.append(host_changed)
self.module.exit_json(changed=any(change), facts=self.hosts_facts)
def check_acceptance_state(self):
self.gather_acceptance_facts()
if self.desired_state == 'list':
self.module.exit_json(changed=False, facts=self.hosts_facts)
self.set_acceptance_level()
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
acceptance_level=dict(type='str',
choices=['community', 'partner', 'vmware_accepted', 'vmware_certified']
),
state=dict(type='str',
choices=['list', 'present'],
default='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
required_if=[
['state', 'present', ['acceptance_level']],
],
supports_check_mode=True
)
vmware_host_accept_config = VMwareAccpetanceManager(module)
vmware_host_accept_config.check_acceptance_state()
if __name__ == "__main__":
main()
| gpl-3.0 |
jcrist/pydy | examples/Kane1985/Chapter4/Ex8.18.py | 7 | 1162 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 8.18 from Kane 1985."""
from __future__ import division
from sympy import symbols
from sympy.physics.mechanics import ReferenceFrame
from sympy.physics.mechanics import cross, dot, dynamicsymbols, inertia
from util import msprint
print("\n part a")
Ia, Ib, Ic, Iab, Ibc, Ica, t = symbols('Ia Ib Ic Iab Ibc Ica t')
omega = dynamicsymbols('omega')
N = ReferenceFrame('N')
# I = (I11 * N.x + I12 * N.y + I13 * N.z) N.x +
# (I21 * N.x + I22 * N.y + I23 * N.z) N.y +
# (I31 * N.x + I32 * N.y + I33 * N.z) N.z
# definition of T* is:
# T* = -dot(alpha, I) - dot(cross(omega, I), omega)
ang_vel = omega * N.x
I = inertia(N, Ia, Ib, Ic, Iab, Ibc, Ica)
T_star = -dot(ang_vel.diff(t, N), I) - dot(cross(ang_vel, I), ang_vel)
print(msprint(T_star))
print("\n part b")
I11, I22, I33, I12, I23, I31 = symbols('I11 I22 I33 I12 I23 I31')
omega1, omega2, omega3 = dynamicsymbols('omega1:4')
B = ReferenceFrame('B')
ang_vel = omega1 * B.x + omega2 * B.y + omega3 * B.z
I = inertia(B, I11, I22, I33, I12, I23, I31)
T_star = -dot(ang_vel.diff(t, B), I) - dot(cross(ang_vel, I), ang_vel)
print(msprint(T_star))
| bsd-3-clause |
JTCunning/sentry | tests/sentry/quotas/redis/tests.py | 13 | 2628 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from exam import fixture, patcher
from sentry.quotas.redis import RedisQuota
from sentry.testutils import TestCase
class RedisQuotaTest(TestCase):
@fixture
def quota(self):
inst = RedisQuota(hosts={
0: {'db': 9}
})
return inst
@patcher.object(RedisQuota, 'get_system_quota')
def get_system_quota(self):
inst = mock.MagicMock()
inst.return_value = 0
return inst
@patcher.object(RedisQuota, 'get_team_quota')
def get_team_quota(self):
inst = mock.MagicMock()
inst.return_value = 0
return inst
@patcher.object(RedisQuota, 'get_project_quota')
def get_project_quota(self):
inst = mock.MagicMock()
inst.return_value = 0
return inst
@patcher.object(RedisQuota, '_incr_project')
def _incr_project(self):
inst = mock.MagicMock()
inst.return_value = (0, 0, 0)
return inst
def test_default_host_is_local(self):
quota = RedisQuota()
self.assertEquals(len(quota.conn.hosts), 1)
self.assertEquals(quota.conn.hosts[0].host, 'localhost')
def test_bails_immediately_without_any_quota(self):
self._incr_project.return_value = (0, 0, 0)
result = self.quota.is_rate_limited(self.project)
assert not self._incr_project.called
assert not result.is_limited
def test_enforces_project_quota(self):
self.get_project_quota.return_value = 100
self._incr_project.return_value = (0, 0, 101)
result = self.quota.is_rate_limited(self.project)
assert result.is_limited
self._incr_project.return_value = (0, 0, 99)
result = self.quota.is_rate_limited(self.project)
assert not result.is_limited
def test_enforces_team_quota(self):
self.get_team_quota.return_value = 100
self._incr_project.return_value = (0, 101, 0)
result = self.quota.is_rate_limited(self.project)
assert result.is_limited
self._incr_project.return_value = (0, 99, 0)
result = self.quota.is_rate_limited(self.project)
assert not result.is_limited
def test_enforces_system_quota(self):
self.get_system_quota.return_value = 100
self._incr_project.return_value = (101, 0, 0)
result = self.quota.is_rate_limited(self.project)
assert result.is_limited
self._incr_project.return_value = (99, 0, 0)
result = self.quota.is_rate_limited(self.project)
assert not result.is_limited
| bsd-3-clause |
dwaynebailey/virtaal | virtaal/plugins/tm/models/google_translate.py | 4 | 6927 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Zuza Software Foundation
# Copyright 2014 F Wolff
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import logging
import urllib
import pycurl
# These two json modules are API compatible
try:
import simplejson as json #should be a bit faster; needed for Python < 2.6
except ImportError:
import json #available since Python 2.6
from basetmmodel import BaseTMModel, unescape_html_entities
from virtaal.support.httpclient import HTTPClient, RESTRequest
# Some codes are weird or can be reused for others
code_translation = {
'fl': 'tl', # Filipino -> Tagalog
'he': 'iw', # Weird code Google uses for Hebrew
'nb': 'no', # Google maps no (Norwegian) to its Norwegian (Bokmål) (nb) translator
}
virtaal_referrer = "http://virtaal.org/"
class TMModel(BaseTMModel):
"""This is a Google Translate translation memory model.
The plugin uses the U{Google AJAX Languages API<http://code.google.com/apis/ajaxlanguage/>}
to query Google's machine translation services. The implementation makes use of the
U{RESTful<http://code.google.com/apis/ajaxlanguage/documentation/#fonje>} interface for
Non-JavaScript environments.
"""
__gtype_name__ = 'GoogleTranslateTMModel'
#l10n: The name of Google Translate in your language (translated in most languages). See http://translate.google.com/
display_name = _('Google Translate')
description = _("Unreviewed machine translations from Google's translation service")
default_config = {'api_key': ''}
translate_url = "https://www.googleapis.com/language/translate/v2?key=%(key)s&q=%(message)s&source=%(from)s&target=%(to)s"
languages_url = "https://www.googleapis.com/language/translate/v2/languages?key=%(key)s"
# INITIALIZERS #
def __init__(self, internal_name, controller):
self.internal_name = internal_name
super(TMModel, self).__init__(controller)
self.load_config()
if not self.config['api_key']:
self._disable_all("An API key is needed to use the Google Translate plugin")
return
self.client = HTTPClient()
self._languages = set()
langreq = RESTRequest(self.url_getlanguages % self.config, '')
self.client.add(langreq)
langreq.connect(
'http-success',
lambda langreq, response: self.got_languages(response)
)
# METHODS #
def query(self, tmcontroller, unit):
query_str = unit.source
# Google's Terms of Service says the whole URL must be less than "2K"
# characters.
query_str = query_str[:2000 - len(self.translate_url)]
source_lang = code_translation.get(self.source_lang, self.source_lang).replace('_', '-')
target_lang = code_translation.get(self.target_lang, self.target_lang).replace('_', '-')
if source_lang not in self._languages or target_lang not in self._languages:
logging.debug('language pair not supported: %s => %s' % (source_lang, target_lang))
return
if self.cache.has_key(query_str):
self.emit('match-found', query_str, self.cache[query_str])
else:
real_url = self.translate_url % {
'key': self.config['api_key'],
'message': urllib.quote_plus(query_str.encode('utf-8')),
'from': source_lang,
'to': target_lang,
}
req = RESTRequest(real_url, '')
self.client.add(req)
# Google's Terms of Service says we need a proper HTTP referrer
req.curl.setopt(pycurl.REFERER, virtaal_referrer)
req.connect(
'http-success',
lambda req, response: self.got_translation(response, query_str)
)
req.connect(
'http-client-error',
lambda req, response: self.got_error(response, query_str)
)
req.connect(
'http-server-error',
lambda req, response: self.got_error(response, query_str)
)
def got_translation(self, val, query_str):
"""Handle the response from the web service now that it came in."""
# In December 2011 version 1 of the API was deprecated, and we had to
# release code to handle the eminent disappearance of the API. Although
# version 2 is now supported, the code is a bit more careful (as most
# code probably should be) and in case of error we make the list of
# supported languages empty so that no unnecesary network activity is
# performed if we can't communicate with the available API any more.
try:
data = json.loads(val)
# We try to access the members to validate that the dictionary is
# formed in the way we expect.
data['data']
data['data']['translations']
text = data['data']['translations'][0]['translatedText']
except Exception, e:
self._disable_all("Error with json response: %s" % e)
return
target_unescaped = unescape_html_entities(text)
if not isinstance(target_unescaped, unicode):
target_unescaped = unicode(target_unescaped, 'utf-8')
match = {
'source': query_str,
'target': target_unescaped,
#l10n: Try to keep this as short as possible. Feel free to transliterate.
'tmsource': _('Google')
}
self.cache[query_str] = [match]
self.emit('match-found', query_str, [match])
def got_languages(self, val):
"""Handle the response from the web service to set up language pairs."""
try:
data = json.loads(val)
data['data']
languages = data['data']['languages']
except Exception, e:
self._disable_all("Error with json response: %s" % e)
return
self._languages = set([l['language'] for l in languages])
def got_error(self, val, query_str):
self._disable_all("Got an error response: %s" % val)
def _disable_all(self, reason):
self._languages = set()
logging.debug("Stopping all queries for Google Translate. %s" % reason)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.