repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
rg3/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/aenetworks.py
|
4
|
from __future__ import unicode_literals
import re
from .theplatform import ThePlatformIE
from ..utils import (
smuggle_url,
update_url_query,
unescapeHTML,
extract_attributes,
get_element_by_attribute,
)
from ..compat import (
compat_urlparse,
)
class AENetworksBaseIE(ThePlatformIE):
_THEPLATFORM_KEY = 'crazyjava'
_THEPLATFORM_SECRET = 's3cr3t'
class AENetworksIE(AENetworksBaseIE):
IE_NAME = 'aenetworks'
IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?P<domain>
(?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
fyi\.tv
)/
(?:
shows/(?P<show_path>[^/]+(?:/[^/]+){0,2})|
movies/(?P<movie_display_id>[^/]+)(?:/full-movie)?|
specials/(?P<special_display_id>[^/]+)/full-special|
collections/[^/]+/(?P<collection_display_id>[^/]+)
)
'''
_TESTS = [{
'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
'md5': 'a97a65f7e823ae10e9244bc5433d5fe6',
'info_dict': {
'id': '22253814',
'ext': 'mp4',
'title': 'Winter Is Coming',
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
'timestamp': 1338306241,
'upload_date': '20120529',
'uploader': 'AENE-NEW',
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.history.com/shows/ancient-aliens/season-1',
'info_dict': {
'id': '71889446852',
},
'playlist_mincount': 5,
}, {
'url': 'http://www.mylifetime.com/shows/atlanta-plastic',
'info_dict': {
'id': 'SERIES4317',
'title': 'Atlanta Plastic',
},
'playlist_mincount': 2,
}, {
'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
'only_matching': True
}, {
'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
'only_matching': True
}, {
'url': 'https://www.lifetimemovieclub.com/movies/a-killer-among-us',
'only_matching': True
}, {
'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
'only_matching': True
}, {
'url': 'https://www.historyvault.com/collections/america-the-story-of-us/westward',
'only_matching': True
}]
_DOMAIN_TO_REQUESTOR_ID = {
'history.com': 'HISTORY',
'aetv.com': 'AETV',
'mylifetime.com': 'LIFETIME',
'lifetimemovieclub.com': 'LIFETIMEMOVIECLUB',
'fyi.tv': 'FYI',
}
def _real_extract(self, url):
domain, show_path, movie_display_id, special_display_id, collection_display_id = re.match(self._VALID_URL, url).groups()
display_id = show_path or movie_display_id or special_display_id or collection_display_id
webpage = self._download_webpage(url, display_id, headers=self.geo_verification_headers())
if show_path:
url_parts = show_path.split('/')
url_parts_len = len(url_parts)
if url_parts_len == 1:
entries = []
for season_url_path in re.findall(r'(?s)<li[^>]+data-href="(/shows/%s/season-\d+)"' % url_parts[0], webpage):
entries.append(self.url_result(
compat_urlparse.urljoin(url, season_url_path), 'AENetworks'))
if entries:
return self.playlist_result(
entries, self._html_search_meta('aetn:SeriesId', webpage),
self._html_search_meta('aetn:SeriesTitle', webpage))
else:
# single season
url_parts_len = 2
if url_parts_len == 2:
entries = []
for episode_item in re.findall(r'(?s)<[^>]+class="[^"]*(?:episode|program)-item[^"]*"[^>]*>', webpage):
episode_attributes = extract_attributes(episode_item)
episode_url = compat_urlparse.urljoin(
url, episode_attributes['data-canonical'])
entries.append(self.url_result(
episode_url, 'AENetworks',
episode_attributes.get('data-videoid') or episode_attributes.get('data-video-id')))
return self.playlist_result(
entries, self._html_search_meta('aetn:SeasonId', webpage))
query = {
'mbr': 'true',
'assetTypes': 'high_video_ak',
'switch': 'hls_high_ak',
}
video_id = self._html_search_meta('aetn:VideoID', webpage)
media_url = self._search_regex(
[r"media_url\s*=\s*'(?P<url>[^']+)'",
r'data-media-url=(?P<url>(?:https?:)?//[^\s>]+)',
r'data-media-url=(["\'])(?P<url>(?:(?!\1).)+?)\1'],
webpage, 'video url', group='url')
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
info = self._parse_theplatform_metadata(theplatform_metadata)
if theplatform_metadata.get('AETN$isBehindWall'):
requestor_id = self._DOMAIN_TO_REQUESTOR_ID[domain]
resource = self._get_mvpd_resource(
requestor_id, theplatform_metadata['title'],
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
theplatform_metadata['ratings'][0]['rating'])
query['auth'] = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
info.update(self._search_json_ld(webpage, video_id, fatal=False))
media_url = update_url_query(media_url, query)
media_url = self._sign_url(media_url, self._THEPLATFORM_KEY, self._THEPLATFORM_SECRET)
formats, subtitles = self._extract_theplatform_smil(media_url, video_id)
self._sort_formats(formats)
info.update({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
})
return info
class HistoryTopicIE(AENetworksBaseIE):
IE_NAME = 'history:topic'
IE_DESC = 'History.com Topic'
_VALID_URL = r'https?://(?:www\.)?history\.com/topics/(?:[^/]+/)?(?P<topic_id>[^/]+)(?:/[^/]+(?:/(?P<video_display_id>[^/?#]+))?)?'
_TESTS = [{
'url': 'http://www.history.com/topics/valentines-day/history-of-valentines-day/videos/bet-you-didnt-know-valentines-day?m=528e394da93ae&s=undefined&f=1&free=false',
'info_dict': {
'id': '40700995724',
'ext': 'mp4',
'title': "Bet You Didn't Know: Valentine's Day",
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
'timestamp': 1375819729,
'upload_date': '20130806',
'uploader': 'AENE-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.history.com/topics/world-war-i/world-war-i-history/videos',
'info_dict':
{
'id': 'world-war-i-history',
'title': 'World War I History',
},
'playlist_mincount': 23,
}, {
'url': 'http://www.history.com/topics/world-war-i-history/videos',
'only_matching': True,
}, {
'url': 'http://www.history.com/topics/world-war-i/world-war-i-history',
'only_matching': True,
}, {
'url': 'http://www.history.com/topics/world-war-i/world-war-i-history/speeches',
'only_matching': True,
}]
def theplatform_url_result(self, theplatform_url, video_id, query):
return {
'_type': 'url_transparent',
'id': video_id,
'url': smuggle_url(
update_url_query(theplatform_url, query),
{
'sig': {
'key': self._THEPLATFORM_KEY,
'secret': self._THEPLATFORM_SECRET,
},
'force_smil_url': True
}),
'ie_key': 'ThePlatform',
}
def _real_extract(self, url):
topic_id, video_display_id = re.match(self._VALID_URL, url).groups()
if video_display_id:
webpage = self._download_webpage(url, video_display_id)
release_url, video_id = re.search(r"_videoPlayer.play\('([^']+)'\s*,\s*'[^']+'\s*,\s*'(\d+)'\)", webpage).groups()
release_url = unescapeHTML(release_url)
return self.theplatform_url_result(
release_url, video_id, {
'mbr': 'true',
'switch': 'hls',
'assetTypes': 'high_video_ak',
})
else:
webpage = self._download_webpage(url, topic_id)
entries = []
for episode_item in re.findall(r'<a.+?data-release-url="[^"]+"[^>]*>', webpage):
video_attributes = extract_attributes(episode_item)
entries.append(self.theplatform_url_result(
video_attributes['data-release-url'], video_attributes['data-id'], {
'mbr': 'true',
'switch': 'hls',
'assetTypes': 'high_video_ak',
}))
return self.playlist_result(entries, topic_id, get_element_by_attribute('class', 'show-title', webpage))
|
tersmitten/ansible
|
refs/heads/devel
|
test/units/modules/network/f5/test_bigip_firewall_port_list.py
|
38
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_firewall_port_list import ApiParameters
from library.modules.bigip_firewall_port_list import ModuleParameters
from library.modules.bigip_firewall_port_list import ModuleManager
from library.modules.bigip_firewall_port_list import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_firewall_port_list import ApiParameters
from ansible.modules.network.f5.bigip_firewall_port_list import ModuleParameters
from ansible.modules.network.f5.bigip_firewall_port_list import ModuleManager
from ansible.modules.network.f5.bigip_firewall_port_list import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
description='this is a description',
ports=[1, 2, 3, 4],
port_ranges=['10-20', '30-40', '50-60'],
port_lists=['/Common/foo', 'foo']
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.description == 'this is a description'
assert len(p.ports) == 4
assert len(p.port_ranges) == 3
assert len(p.port_lists) == 2
def test_api_parameters(self):
args = load_fixture('load_security_port_list_1.json')
p = ApiParameters(params=args)
assert len(p.ports) == 4
assert len(p.port_ranges) == 3
assert len(p.port_lists) == 1
assert sorted(p.ports) == [1, 2, 3, 4]
assert sorted(p.port_ranges) == ['10-20', '30-40', '50-60']
assert p.port_lists[0] == '/Common/_sys_self_allow_tcp_defaults'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_firewall_port_list.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_firewall_port_list.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.p1.stop()
def test_create(self, *args):
set_module_args(dict(
name='foo',
description='this is a description',
ports=[1, 2, 3, 4],
port_ranges=['10-20', '30-40', '50-60'],
port_lists=['/Common/foo', 'foo'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert 'ports' in results
assert 'port_lists' in results
assert 'port_ranges' in results
assert len(results['ports']) == 4
assert len(results['port_ranges']) == 3
assert len(results['port_lists']) == 2
assert results['description'] == 'this is a description'
|
aguadev/aguadev
|
refs/heads/master
|
apps/node/node-v0.10.15/deps/v8/tools/testrunner/server/work_handler.py
|
123
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import SocketServer
import stat
import subprocess
import threading
from . import compression
from . import constants
from . import signatures
from ..network import endpoint
from ..objects import workpacket
class WorkHandler(SocketServer.BaseRequestHandler):
def handle(self):
rec = compression.Receiver(self.request)
while not rec.IsDone():
data = rec.Current()
with self.server.job_lock:
self._WorkOnWorkPacket(data)
rec.Advance()
def _WorkOnWorkPacket(self, data):
server_root = self.server.daemon.root
v8_root = os.path.join(server_root, "v8")
os.chdir(v8_root)
packet = workpacket.WorkPacket.Unpack(data)
self.ctx = packet.context
self.ctx.shell_dir = os.path.join("out",
"%s.%s" % (self.ctx.arch, self.ctx.mode))
if not os.path.isdir(self.ctx.shell_dir):
os.makedirs(self.ctx.shell_dir)
for binary in packet.binaries:
if not self._UnpackBinary(binary, packet.pubkey_fingerprint):
return
if not self._CheckoutRevision(packet.base_revision):
return
if not self._ApplyPatch(packet.patch):
return
tests = packet.tests
endpoint.Execute(v8_root, self.ctx, tests, self.request, self.server.daemon)
self._SendResponse()
def _SendResponse(self, error_message=None):
try:
if error_message:
compression.Send([[-1, error_message]], self.request)
compression.Send(constants.END_OF_STREAM, self.request)
return
except Exception, e:
pass # Peer is gone. There's nothing we can do.
# Clean up.
self._Call("git checkout -f")
self._Call("git clean -f -d")
self._Call("rm -rf %s" % self.ctx.shell_dir)
def _UnpackBinary(self, binary, pubkey_fingerprint):
binary_name = binary["name"]
if binary_name == "libv8.so":
libdir = os.path.join(self.ctx.shell_dir, "lib.target")
if not os.path.exists(libdir): os.makedirs(libdir)
target = os.path.join(libdir, binary_name)
else:
target = os.path.join(self.ctx.shell_dir, binary_name)
pubkeyfile = "../trusted/%s.pem" % pubkey_fingerprint
if not signatures.VerifySignature(target, binary["blob"],
binary["sign"], pubkeyfile):
self._SendResponse("Signature verification failed")
return False
os.chmod(target, stat.S_IRWXU)
return True
def _CheckoutRevision(self, base_svn_revision):
get_hash_cmd = (
"git log -1 --format=%%H --remotes --grep='^git-svn-id:.*@%s'" %
base_svn_revision)
try:
base_revision = subprocess.check_output(get_hash_cmd, shell=True)
if not base_revision: raise ValueError
except:
self._Call("git fetch")
try:
base_revision = subprocess.check_output(get_hash_cmd, shell=True)
if not base_revision: raise ValueError
except:
self._SendResponse("Base revision not found.")
return False
code = self._Call("git checkout -f %s" % base_revision)
if code != 0:
self._SendResponse("Error trying to check out base revision.")
return False
code = self._Call("git clean -f -d")
if code != 0:
self._SendResponse("Failed to reset checkout")
return False
return True
def _ApplyPatch(self, patch):
if not patch: return True # Just skip if the patch is empty.
patchfilename = "_dtest_incoming_patch.patch"
with open(patchfilename, "w") as f:
f.write(patch)
code = self._Call("git apply %s" % patchfilename)
if code != 0:
self._SendResponse("Error applying patch.")
return False
return True
def _Call(self, cmd):
return subprocess.call(cmd, shell=True)
class WorkSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, daemon):
address = (daemon.ip, constants.PEER_PORT)
SocketServer.TCPServer.__init__(self, address, WorkHandler)
self.job_lock = threading.Lock()
self.daemon = daemon
|
rapidhere/rpbtman_autosign
|
refs/heads/master
|
pytz/zoneinfo/America/Mendoza.py
|
9
|
'''tzinfo timezone information for America/Mendoza.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Mendoza(DstTzInfo):
'''America/Mendoza timezone definition. See datetime.tzinfo for details'''
zone = 'America/Mendoza'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1920,5,1,4,16,48),
d(1930,12,1,4,0,0),
d(1931,4,1,3,0,0),
d(1931,10,15,4,0,0),
d(1932,3,1,3,0,0),
d(1932,11,1,4,0,0),
d(1933,3,1,3,0,0),
d(1933,11,1,4,0,0),
d(1934,3,1,3,0,0),
d(1934,11,1,4,0,0),
d(1935,3,1,3,0,0),
d(1935,11,1,4,0,0),
d(1936,3,1,3,0,0),
d(1936,11,1,4,0,0),
d(1937,3,1,3,0,0),
d(1937,11,1,4,0,0),
d(1938,3,1,3,0,0),
d(1938,11,1,4,0,0),
d(1939,3,1,3,0,0),
d(1939,11,1,4,0,0),
d(1940,3,1,3,0,0),
d(1940,7,1,4,0,0),
d(1941,6,15,3,0,0),
d(1941,10,15,4,0,0),
d(1943,8,1,3,0,0),
d(1943,10,15,4,0,0),
d(1946,3,1,3,0,0),
d(1946,10,1,4,0,0),
d(1963,10,1,3,0,0),
d(1963,12,15,4,0,0),
d(1964,3,1,3,0,0),
d(1964,10,15,4,0,0),
d(1965,3,1,3,0,0),
d(1965,10,15,4,0,0),
d(1966,3,1,3,0,0),
d(1966,10,15,4,0,0),
d(1967,4,2,3,0,0),
d(1967,10,1,4,0,0),
d(1968,4,7,3,0,0),
d(1968,10,6,4,0,0),
d(1969,4,6,3,0,0),
d(1969,10,5,4,0,0),
d(1974,1,23,3,0,0),
d(1974,5,1,2,0,0),
d(1988,12,1,3,0,0),
d(1989,3,5,2,0,0),
d(1989,10,15,3,0,0),
d(1990,3,4,2,0,0),
d(1990,10,15,4,0,0),
d(1991,3,1,3,0,0),
d(1991,10,15,4,0,0),
d(1992,3,1,3,0,0),
d(1992,10,18,4,0,0),
d(1993,3,7,2,0,0),
d(1999,10,3,3,0,0),
d(2000,3,3,3,0,0),
d(2004,5,23,3,0,0),
d(2004,9,26,4,0,0),
]
_transition_info = [
i(-15420,0,'CMT'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-14400,0,'WART'),
i(-10800,3600,'WARST'),
i(-14400,0,'WART'),
i(-10800,3600,'WARST'),
i(-14400,0,'WART'),
i(-7200,7200,'ARST'),
i(-10800,0,'ART'),
i(-10800,0,'ARST'),
i(-10800,0,'ART'),
i(-14400,0,'WART'),
i(-10800,0,'ART'),
]
Mendoza = Mendoza()
|
Lektorium-LLC/edx-platform
|
refs/heads/master
|
common/lib/capa/capa/inputtypes.py
|
8
|
#
# File: courseware/capa/inputtypes.py
#
"""
Module containing the problem elements which render into input objects
- textline
- textbox (aka codeinput)
- schematic
- choicegroup (aka radiogroup, checkboxgroup)
- imageinput (for clickable image)
- optioninput (for option list)
- filesubmission (upload a file)
- crystallography
- vsepr_input
- drag_and_drop
- formulaequationinput
- chemicalequationinput
These are matched by *.html files templates/*.html which are mako templates with the
actual html.
Each input type takes the xml tree as 'element', the previous answer as 'value', and the
graded status as'status'
"""
# TODO: make hints do something
# TODO: make all inputtypes actually render msg
# TODO: remove unused fields (e.g. 'hidden' in a few places)
# TODO: add validators so that content folks get better error messages.
# Possible todo: make inline the default for textlines and other "one-line" inputs. It probably
# makes sense, but a bunch of problems have markup that assumes block. Bigger TODO: figure out a
# general css and layout strategy for capa, document it, then implement it.
import json
import logging
import re
import shlex # for splitting quoted strings
import sys
import time
from datetime import datetime
import bleach
import html5lib
import pyparsing
from lxml import etree
import xqueue_interface
from calc.preview import latex_preview
from capa.xqueue_interface import XQUEUE_TIMEOUT
from chem import chemcalc
from openedx.core.djangolib.markup import HTML, Text
from xmodule.stringify import stringify_children
from .registry import TagRegistry
from .util import sanitize_html
log = logging.getLogger(__name__)
#########################################################################
registry = TagRegistry() # pylint: disable=invalid-name
class Status(object):
"""
Problem status
attributes: classname, display_name, display_tooltip
"""
css_classes = {
# status: css class
'unsubmitted': 'unanswered',
'incomplete': 'incorrect',
'queued': 'processing',
}
__slots__ = ('classname', '_status', 'display_name', 'display_tooltip')
def __init__(self, status, gettext_func=unicode):
self.classname = self.css_classes.get(status, status)
_ = gettext_func
names = {
'correct': _('correct'),
'incorrect': _('incorrect'),
'partially-correct': _('partially correct'),
'incomplete': _('incomplete'),
'unanswered': _('unanswered'),
'unsubmitted': _('unanswered'),
'submitted': _('submitted'),
'queued': _('processing'),
}
tooltips = {
# Translators: these are tooltips that indicate the state of an assessment question
'correct': _('This answer is correct.'),
'incorrect': _('This answer is incorrect.'),
'partially-correct': _('This answer is partially correct.'),
'queued': _('This answer is being processed.'),
}
tooltips.update(
dict.fromkeys(
['incomplete', 'unanswered', 'unsubmitted'], _('Not yet answered.')
)
)
self.display_name = names.get(status, unicode(status))
self.display_tooltip = tooltips.get(status, u'')
self._status = status or ''
def __str__(self):
return self._status
def __unicode__(self):
return self._status.decode('utf8')
def __repr__(self):
return 'Status(%r)' % self._status
def __eq__(self, other):
return self._status == str(other)
class Attribute(object):
"""
Allows specifying required and optional attributes for input types.
"""
# want to allow default to be None, but also allow required objects
_sentinel = object()
def __init__(self, name, default=_sentinel, transform=None, validate=None, render=True):
"""
Define an attribute
name (str): then name of the attribute--should be alphanumeric (valid for an XML attribute)
default (any type): If not specified, this attribute is required. If specified, use this as the default value
if the attribute is not specified. Note that this value will not be transformed or validated.
transform (function str -> any type): If not None, will be called to transform the parsed value into an internal
representation.
validate (function str-or-return-type-of-tranform -> unit or exception): If not None, called to validate the
(possibly transformed) value of the attribute. Should raise ValueError with a helpful message if
the value is invalid.
render (bool): if False, don't include this attribute in the template context.
"""
self.name = name
self.default = default
self.validate = validate
self.transform = transform
self.render = render
def parse_from_xml(self, element):
"""
Given an etree xml element that should have this attribute, do the obvious thing:
- look for it. raise ValueError if not found and required.
- transform and validate. pass through any exceptions from transform or validate.
"""
val = element.get(self.name)
if self.default == self._sentinel and val is None:
raise ValueError(
'Missing required attribute {0}.'.format(self.name)
)
if val is None:
# not required, so return default
return self.default
if self.transform is not None:
val = self.transform(val)
if self.validate is not None:
self.validate(val)
return val
class InputTypeBase(object):
"""
Abstract base class for input types.
"""
template = None
def __init__(self, system, xml, state):
"""
Instantiate an InputType class. Arguments:
- system : LoncapaModule instance which provides OS, rendering, and user context.
Specifically, must have a render_template function.
- xml : Element tree of this Input element
- state : a dictionary with optional keys:
* 'value' -- the current value of this input
(what the student entered last time)
* 'id' -- the id of this input, typically
"{problem-location}_{response-num}_{input-num}"
* 'status' (submitted, unanswered, unsubmitted)
* 'input_state' -- dictionary containing any inputtype-specific state
that has been preserved
* 'feedback' (dictionary containing keys for hints, errors, or other
feedback from previous attempt. Specifically 'message', 'hint',
'hintmode'. If 'hintmode' is 'always', the hint is always displayed.)
"""
self.xml = xml
self.tag = xml.tag
self.capa_system = system
# NOTE: ID should only come from one place. If it comes from multiple,
# we use state first, XML second (in case the xml changed, but we have
# existing state with an old id). Since we don't make this guarantee,
# we can swap this around in the future if there's a more logical
# order.
self.input_id = state.get('id', xml.get('id'))
if self.input_id is None:
raise ValueError(
"input id state is None. xml is {0}".format(etree.tostring(xml))
)
self.value = state.get('value', '')
feedback = state.get('feedback', {})
self.msg = feedback.get('message', '')
self.hint = feedback.get('hint', '')
self.hintmode = feedback.get('hintmode', None)
self.input_state = state.get('input_state', {})
self.answervariable = state.get('answervariable', None)
self.response_data = state.get('response_data')
# put hint above msg if it should be displayed
if self.hintmode == 'always':
self.msg = HTML('{hint}<br/>{msg}' if self.msg else '{hint}').format(hint=HTML(self.hint),
msg=HTML(self.msg))
self.status = state.get('status', 'unanswered')
try:
# Pre-parse and process all the declared requirements.
self.process_requirements()
# Call subclass "constructor" -- means they don't have to worry about calling
# super().__init__, and are isolated from changes to the input
# constructor interface.
self.setup()
except Exception as err:
# Something went wrong: add xml to message, but keep the traceback
msg = u"Error in xml '{x}': {err} ".format(
x=etree.tostring(xml), err=err.message)
raise Exception, msg, sys.exc_info()[2]
@classmethod
def get_attributes(cls):
"""
Should return a list of Attribute objects (see docstring there for details). Subclasses should override. e.g.
return [Attribute('unicorn', True), Attribute('num_dragons', 12, transform=int), ...]
"""
return []
def process_requirements(self):
"""
Subclasses can declare lists of required and optional attributes. This
function parses the input xml and pulls out those attributes. This
isolates most simple input types from needing to deal with xml parsing at all.
Processes attributes, putting the results in the self.loaded_attributes dictionary. Also creates a set
self.to_render, containing the names of attributes that should be included in the context by default.
"""
# Use local dicts and sets so that if there are exceptions, we don't
# end up in a partially-initialized state.
loaded = {}
to_render = set()
for attribute in self.get_attributes():
loaded[attribute.name] = attribute.parse_from_xml(self.xml)
if attribute.render:
to_render.add(attribute.name)
self.loaded_attributes = loaded
self.to_render = to_render
def setup(self):
"""
InputTypes should override this to do any needed initialization. It is called after the
constructor, so all base attributes will be set.
If this method raises an exception, it will be wrapped with a message that includes the
problem xml.
"""
pass
def handle_ajax(self, dispatch, data):
"""
InputTypes that need to handle specialized AJAX should override this.
Input:
dispatch: a string that can be used to determine how to handle the data passed in
data: a dictionary containing the data that was sent with the ajax call
Output:
a dictionary object that can be serialized into JSON. This will be sent back to the Javascript.
"""
pass
def _get_render_context(self):
"""
Should return a dictionary of keys needed to render the template for the input type.
(Separate from get_html to faciliate testing of logic separately from the rendering)
The default implementation gets the following rendering context: basic things like value, id, status, and msg,
as well as everything in self.loaded_attributes, and everything returned by self._extra_context().
This means that input types that only parse attributes and pass them to the template get everything they need,
and don't need to override this method.
"""
context = {
'id': self.input_id,
'value': self.value,
'status': Status(self.status, self.capa_system.i18n.ugettext),
'msg': self.msg,
'response_data': self.response_data,
'STATIC_URL': self.capa_system.STATIC_URL,
'describedby_html': HTML(''),
}
# Generate the list of ids to be used with the aria-describedby field.
descriptions = list()
# If there is trailing text, add the id as the first element to the list before adding the status id
if 'trailing_text' in self.loaded_attributes and self.loaded_attributes['trailing_text']:
trailing_text_id = 'trailing_text_' + self.input_id
descriptions.append(trailing_text_id)
# Every list should contain the status id
status_id = 'status_' + self.input_id
descriptions.append(status_id)
descriptions.extend(self.response_data.get('descriptions', {}).keys())
description_ids = ' '.join(descriptions)
context.update(
{'describedby_html': HTML('aria-describedby="{}"').format(description_ids)}
)
context.update(
(a, v) for (a, v) in self.loaded_attributes.iteritems() if a in self.to_render
)
context.update(self._extra_context())
if self.answervariable:
context.update({'answervariable': self.answervariable})
return context
def _extra_context(self):
"""
Subclasses can override this to return extra context that should be passed to their templates for rendering.
This is useful when the input type requires computing new template variables from the parsed attributes.
"""
return {}
def get_html(self):
"""
Return the html for this input, as an etree element.
"""
if self.template is None:
raise NotImplementedError("no rendering template specified for class {0}"
.format(self.__class__))
context = self._get_render_context()
html = self.capa_system.render_template(self.template, context).strip()
try:
output = etree.XML(html)
except etree.XMLSyntaxError as ex:
# If `html` contains attrs with no values, like `controls` in <audio controls src='smth'/>,
# XML parser will raise exception, so wee fallback to html5parser, which will set empty "" values for such attrs.
try:
output = html5lib.parseFragment(html, treebuilder='lxml', namespaceHTMLElements=False)[0]
except IndexError:
raise ex
return output
def get_user_visible_answer(self, internal_answer):
"""
Given the internal representation of the answer provided by the user, return the representation of the answer
as the user saw it. Subclasses should override this method if and only if the internal represenation of the
answer is different from the answer that is displayed to the user.
"""
return internal_answer
#-----------------------------------------------------------------------------
@registry.register
class OptionInput(InputTypeBase):
"""
Input type for selecting and Select option input type.
Example:
<optioninput options="('Up','Down')" correct="Up"/><text>The location of the sky</text>
# TODO: allow ordering to be randomized
"""
template = "optioninput.html"
tags = ['optioninput']
@staticmethod
def parse_options(options):
"""
Given options string, convert it into an ordered list of (option_id, option_description) tuples, where
id==description for now. TODO: make it possible to specify different id and descriptions.
"""
# convert single quotes inside option values to html encoded string
options = re.sub(r"([a-zA-Z])('|\\')([a-zA-Z])", r"\1'\3", options)
options = re.sub(r"\\'", r"'", options) # replace already escaped single quotes
# parse the set of possible options
lexer = shlex.shlex(options[1:-1].encode('utf8'))
lexer.quotes = "'"
# Allow options to be separated by whitespace as well as commas
lexer.whitespace = ", "
# remove quotes
# convert escaped single quotes (html encoded string) back to single quotes
tokens = [x[1:-1].decode('utf8').replace("'", "'") for x in lexer]
# make list of (option_id, option_description), with description=id
return [(t, t) for t in tokens]
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [Attribute('options', transform=cls.parse_options),
Attribute('inline', False)]
def _extra_context(self):
"""
Return extra context.
"""
_ = self.capa_system.i18n.ugettext
return {'default_option_text': _('Select an option')}
#-----------------------------------------------------------------------------
# TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of
# desired semantics.
@registry.register
class ChoiceGroup(InputTypeBase):
"""
Radio button or checkbox inputs: multiple choice or true/false
TODO: allow order of choices to be randomized, following lon-capa spec. Use
"location" attribute, ie random, top, bottom.
Example:
<choicegroup>
<choice correct="false" name="foil1">
<text>This is foil One.</text>
</choice>
<choice correct="false" name="foil2">
<text>This is foil Two.</text>
</choice>
<choice correct="true" name="foil3">
<text>This is foil Three.</text>
</choice>
</choicegroup>
"""
template = "choicegroup.html"
tags = ['choicegroup', 'radiogroup', 'checkboxgroup']
def setup(self):
i18n = self.capa_system.i18n
# suffix is '' or [] to change the way the input is handled in --as a scalar or vector
# value. (VS: would be nice to make this less hackish).
if self.tag == 'choicegroup':
self.suffix = ''
self.html_input_type = "radio"
elif self.tag == 'radiogroup':
self.html_input_type = "radio"
self.suffix = '[]'
elif self.tag == 'checkboxgroup':
self.html_input_type = "checkbox"
self.suffix = '[]'
else:
_ = i18n.ugettext
# Translators: 'ChoiceGroup' is an input type and should not be translated.
msg = _("ChoiceGroup: unexpected tag {tag_name}").format(tag_name=self.tag)
raise Exception(msg)
self.choices = self.extract_choices(self.xml, i18n)
self._choices_map = dict(self.choices,)
@classmethod
def get_attributes(cls):
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
return [Attribute("show_correctness", "always"),
Attribute("submitted_message", _("Answer received."))]
def _extra_context(self):
return {'input_type': self.html_input_type,
'choices': self.choices,
'name_array_suffix': self.suffix}
@staticmethod
def extract_choices(element, i18n):
"""
Extracts choices for a few input types, such as ChoiceGroup, RadioGroup and
CheckboxGroup.
returns list of (choice_name, choice_text) tuples
TODO: allow order of choices to be randomized, following lon-capa spec. Use
"location" attribute, ie random, top, bottom.
"""
choices = []
_ = i18n.ugettext
for choice in element:
if choice.tag == 'choice':
choices.append((choice.get("name"), stringify_children(choice)))
else:
if choice.tag != 'compoundhint':
msg = Text('[capa.inputtypes.extract_choices] {error_message}').format(
error_message=Text(
# Translators: '<choice>' and '<compoundhint>' are tag names and should not be translated.
_('Expected a <choice> or <compoundhint> tag; got {given_tag} instead')).format(
given_tag=choice.tag
)
)
raise Exception(msg)
return choices
def get_user_visible_answer(self, internal_answer):
if isinstance(internal_answer, basestring):
return self._choices_map[internal_answer]
return [self._choices_map[i] for i in internal_answer]
#-----------------------------------------------------------------------------
@registry.register
class JSInput(InputTypeBase):
"""
Inputtype for general javascript inputs. Intended to be used with
customresponse.
Loads in a sandboxed iframe to help prevent css and js conflicts between
frame and top-level window.
iframe sandbox whitelist:
- allow-scripts
- allow-popups
- allow-forms
- allow-pointer-lock
This in turn means that the iframe cannot directly access the top-level
window elements.
Example:
<jsinput html_file="/static/test.html"
gradefn="grade"
height="500"
width="400"/>
See the documentation in docs/data/source/course_data_formats/jsinput.rst
for more information.
"""
template = "jsinput.html"
tags = ['jsinput']
@classmethod
def get_attributes(cls):
"""
Register the attributes.
"""
return [
Attribute('params', None), # extra iframe params
Attribute('html_file', None),
Attribute('gradefn', "gradefn"),
Attribute('get_statefn', None), # Function to call in iframe
# to get current state.
Attribute('initial_state', None), # JSON string to be used as initial state
Attribute('set_statefn', None), # Function to call iframe to
# set state
Attribute('width', "400"), # iframe width
Attribute('height', "300"), # iframe height
# Title for the iframe, which should be supplied by the author of the problem. Not translated
# because we are in a class method and therefore do not have access to capa_system.i18n.
# Note that the default "display name" for the problem is also not translated.
Attribute('title', "Problem Remote Content"),
# SOP will be relaxed only if this attribute is set to false.
Attribute('sop', None)
]
def _extra_context(self):
context = {
'jschannel_loader': '{static_url}js/capa/src/jschannel.js'.format(
static_url=self.capa_system.STATIC_URL),
'jsinput_loader': '{static_url}js/capa/src/jsinput.js'.format(
static_url=self.capa_system.STATIC_URL),
'saved_state': self.value
}
return context
#-----------------------------------------------------------------------------
@registry.register
class TextLine(InputTypeBase):
"""
A text line input. Can do math preview if "math"="1" is specified.
If "trailing_text" is set to a value, then the textline will be shown with
the value after the text input, and before the checkmark or any input-specific
feedback. HTML will not work, but properly escaped HTML characters will. This
feature is useful if you would like to specify a specific type of units for the
text input.
If the hidden attribute is specified, the textline is hidden and the input id
is stored in a div with name equal to the value of the hidden attribute. This
is used e.g. for embedding simulations turned into questions.
Example:
<textline math="1" trailing_text="m/s"/>
This example will render out a text line with a math preview and the text 'm/s'
after the end of the text line.
"""
template = "textline.html"
tags = ['textline']
@classmethod
def get_attributes(cls):
"""
Register the attributes.
"""
return [
Attribute('size', None),
Attribute('hidden', False),
Attribute('inline', False),
# Attributes below used in setup(), not rendered directly.
Attribute('math', None, render=False),
# TODO: 'dojs' flag is temporary, for backwards compatibility with
# 8.02x
Attribute('dojs', None, render=False),
Attribute('preprocessorClassName', None, render=False),
Attribute('preprocessorSrc', None, render=False),
Attribute('trailing_text', ''),
]
def setup(self):
self.do_math = bool(self.loaded_attributes['math'] or
self.loaded_attributes['dojs'])
# TODO: do math checking using ajax instead of using js, so
# that we only have one math parser.
self.preprocessor = None
if self.do_math:
# Preprocessor to insert between raw input and Mathjax
self.preprocessor = {
'class_name': self.loaded_attributes['preprocessorClassName'],
'script_src': self.loaded_attributes['preprocessorSrc'],
}
if None in self.preprocessor.values():
self.preprocessor = None
def _extra_context(self):
return {'do_math': self.do_math,
'preprocessor': self.preprocessor, }
#-----------------------------------------------------------------------------
@registry.register
class FileSubmission(InputTypeBase):
"""
Upload some files (e.g. for programming assignments)
"""
template = "filesubmission.html"
tags = ['filesubmission']
@staticmethod
def parse_files(files):
"""
Given a string like 'a.py b.py c.out', split on whitespace and return as a json list.
"""
return json.dumps(files.split())
@classmethod
def get_attributes(cls):
"""
Convert the list of allowed files to a convenient format.
"""
return [Attribute('allowed_files', '[]', transform=cls.parse_files),
Attribute('required_files', '[]', transform=cls.parse_files), ]
def setup(self):
"""
Do some magic to handle queueing status (render as "queued" instead of "incomplete"),
pull queue_len from the msg field. (TODO: get rid of the queue_len hack).
"""
_ = self.capa_system.i18n.ugettext
submitted_msg = _("Your files have been submitted. As soon as your submission is"
" graded, this message will be replaced with the grader's feedback.")
self.submitted_msg = submitted_msg
# Check if problem has been queued
self.queue_len = 0
# Flag indicating that the problem has been queued, 'msg' is length of
# queue
if self.status == 'incomplete':
self.status = 'queued'
self.queue_len = self.msg
self.msg = self.submitted_msg
def _extra_context(self):
return {'queue_len': self.queue_len, }
#-----------------------------------------------------------------------------
@registry.register
class CodeInput(InputTypeBase):
"""
A text area input for code--uses codemirror, does syntax highlighting, special tab handling,
etc.
"""
template = "codeinput.html"
tags = [
'codeinput',
'textbox',
# Another (older) name--at some point we may want to make it use a
# non-codemirror editor.
]
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [
Attribute('rows', '30'),
Attribute('cols', '80'),
Attribute('hidden', ''),
# For CodeMirror
Attribute('mode', 'python'),
Attribute('linenumbers', 'true'),
# Template expects tabsize to be an int it can do math with
Attribute('tabsize', 4, transform=int),
]
def setup_code_response_rendering(self):
"""
Implement special logic: handle queueing state, and default input.
"""
# if no student input yet, then use the default input given by the
# problem
if not self.value and self.xml.text:
self.value = self.xml.text.strip()
# Check if problem has been queued
self.queue_len = 0
# Flag indicating that the problem has been queued, 'msg' is length of
# queue
if self.status == 'incomplete':
self.status = 'queued'
self.queue_len = self.msg
self.msg = bleach.clean(self.submitted_msg)
def setup(self):
""" setup this input type """
_ = self.capa_system.i18n.ugettext
submitted_msg = _("Your answer has been submitted. As soon as your submission is"
" graded, this message will be replaced with the grader's feedback.")
self.submitted_msg = submitted_msg
self.setup_code_response_rendering()
def _extra_context(self):
"""
Define queue_len, arial_label and code mirror exit message context variables
"""
_ = self.capa_system.i18n.ugettext
return {
'queue_len': self.queue_len,
'aria_label': _('{programming_language} editor').format(
programming_language=self.loaded_attributes.get('mode')
),
'code_mirror_exit_message': _('Press ESC then TAB or click outside of the code editor to exit')
}
#-----------------------------------------------------------------------------
@registry.register
class MatlabInput(CodeInput):
"""
InputType for handling Matlab code input
Example:
<matlabinput rows="10" cols="80" tabsize="4">
Initial Text
</matlabinput>
"""
template = "matlabinput.html"
tags = ['matlabinput']
def setup(self):
"""
Handle matlab-specific parsing
"""
_ = self.capa_system.i18n.ugettext
submitted_msg = _("Submitted. As soon as a response is returned, "
"this message will be replaced by that feedback.")
self.submitted_msg = submitted_msg
self.setup_code_response_rendering()
xml = self.xml
self.plot_payload = xml.findtext('./plot_payload')
# Check if problem has been queued
self.queuename = 'matlab'
self.queue_msg = ''
# this is only set if we don't have a graded response
# the graded response takes precedence
if 'queue_msg' in self.input_state and self.status in ['queued', 'incomplete', 'unsubmitted']:
self.queue_msg = sanitize_html(self.input_state['queue_msg'])
if 'queuestate' in self.input_state and self.input_state['queuestate'] == 'queued':
self.status = 'queued'
self.queue_len = 1
self.msg = self.submitted_msg
# Handle situation if no response from xqueue arrived during specified time.
if ('queuetime' not in self.input_state or
time.time() - self.input_state['queuetime'] > XQUEUE_TIMEOUT):
self.queue_len = 0
self.status = 'unsubmitted'
self.msg = _(
'No response from Xqueue within {xqueue_timeout} seconds. Aborted.'
).format(xqueue_timeout=XQUEUE_TIMEOUT)
def handle_ajax(self, dispatch, data):
"""
Handle AJAX calls directed to this input
Args:
- dispatch (str) - indicates how we want this ajax call to be handled
- data (dict) - dictionary of key-value pairs that contain useful data
Returns:
dict - 'success' - whether or not we successfully queued this submission
- 'message' - message to be rendered in case of error
"""
if dispatch == 'plot':
return self._plot_data(data)
return {}
def ungraded_response(self, queue_msg, queuekey):
"""
Handle the response from the XQueue
Stores the response in the input_state so it can be rendered later
Args:
- queue_msg (str) - message returned from the queue. The message to be rendered
- queuekey (str) - a key passed to the queue. Will be matched up to verify that this is the response we're waiting for
Returns:
nothing
"""
# check the queuekey against the saved queuekey
if('queuestate' in self.input_state and self.input_state['queuestate'] == 'queued'
and self.input_state['queuekey'] == queuekey):
msg = self._parse_data(queue_msg)
# save the queue message so that it can be rendered later
self.input_state['queue_msg'] = msg
self.input_state['queuestate'] = None
self.input_state['queuekey'] = None
def button_enabled(self):
""" Return whether or not we want the 'Test Code' button visible
Right now, we only want this button to show up when a problem has not been
checked.
"""
if self.status in ['correct', 'incorrect', 'partially-correct']:
return False
else:
return True
def _extra_context(self):
""" Set up additional context variables"""
_ = self.capa_system.i18n.ugettext
queue_msg = self.queue_msg
if len(self.queue_msg) > 0: # An empty string cannot be parsed as XML but is okay to include in the template.
try:
etree.XML(HTML(u'<div>{0}</div>').format(HTML(self.queue_msg)))
except etree.XMLSyntaxError:
try:
html5lib.parseFragment(self.queue_msg, treebuilder='lxml', namespaceHTMLElements=False)[0]
except (IndexError, ValueError):
# If neither can parse queue_msg, it contains invalid xml.
queue_msg = HTML("<span>{0}</span>").format(_("Error running code."))
extra_context = {
'queue_len': str(self.queue_len),
'queue_msg': queue_msg,
'button_enabled': self.button_enabled(),
'matlab_editor_js': '{static_url}js/vendor/CodeMirror/octave.js'.format(
static_url=self.capa_system.STATIC_URL),
'msg': sanitize_html(self.msg) # sanitize msg before rendering into template
}
return extra_context
def _parse_data(self, queue_msg):
"""
Parses the message out of the queue message
Args:
queue_msg (str) - a JSON encoded string
Returns:
returns the value for the the key 'msg' in queue_msg
"""
try:
result = json.loads(queue_msg)
except (TypeError, ValueError):
log.error("External message should be a JSON serialized dict."
" Received queue_msg = %s", queue_msg)
raise
msg = result['msg']
return msg
def _plot_data(self, data):
"""
AJAX handler for the plot button
Args:
get (dict) - should have key 'submission' which contains the student submission
Returns:
dict - 'success' - whether or not we successfully queued this submission
- 'message' - message to be rendered in case of error
"""
_ = self.capa_system.i18n.ugettext
# only send data if xqueue exists
if self.capa_system.xqueue is None:
return {'success': False, 'message': _('Cannot connect to the queue')}
# pull relevant info out of get
response = data['submission']
# construct xqueue headers
qinterface = self.capa_system.xqueue['interface']
qtime = datetime.utcnow().strftime(xqueue_interface.dateformat)
callback_url = self.capa_system.xqueue['construct_callback']('ungraded_response')
anonymous_student_id = self.capa_system.anonymous_student_id
# TODO: Why is this using self.capa_system.seed when we have self.seed???
queuekey = xqueue_interface.make_hashkey(str(self.capa_system.seed) + qtime +
anonymous_student_id +
self.input_id)
xheader = xqueue_interface.make_xheader(
lms_callback_url=callback_url,
lms_key=queuekey,
queue_name=self.queuename)
# construct xqueue body
student_info = {
'anonymous_student_id': anonymous_student_id,
'submission_time': qtime
}
contents = {
'grader_payload': self.plot_payload,
'student_info': json.dumps(student_info),
'student_response': response,
'token': getattr(self.capa_system, 'matlab_api_key', None),
'endpoint_version': "2",
'requestor_id': anonymous_student_id,
}
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
# save the input state if successful
if error == 0:
self.input_state['queuekey'] = queuekey
self.input_state['queuestate'] = 'queued'
self.input_state['queuetime'] = time.time()
return {'success': error == 0, 'message': msg}
#-----------------------------------------------------------------------------
@registry.register
class Schematic(InputTypeBase):
"""
InputType for the schematic editor
"""
template = "schematicinput.html"
tags = ['schematic']
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [
Attribute('height', None),
Attribute('width', None),
Attribute('parts', None),
Attribute('analyses', None),
Attribute('initial_value', None),
Attribute('submit_analyses', None),
]
def _extra_context(self):
context = {
'setup_script': '{static_url}js/capa/schematicinput.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#-----------------------------------------------------------------------------
@registry.register
class ImageInput(InputTypeBase):
"""
Clickable image as an input field. Element should specify the image source, height,
and width, e.g.
<imageinput src="/static/Figures/Skier-conservation-of-energy.jpg" width="388" height="560" />
TODO: showanswer for imageimput does not work yet - need javascript to put rectangle
over acceptable area of image.
"""
template = "imageinput.html"
tags = ['imageinput']
@classmethod
def get_attributes(cls):
"""
Note: src, height, and width are all required.
"""
return [Attribute('src'),
Attribute('height'),
Attribute('width'), ]
def setup(self):
"""
if value is of the form [x,y] then parse it and send along coordinates of previous answer
"""
m = re.match(r'\[([0-9]+),([0-9]+)]',
self.value.strip().replace(' ', ''))
if m:
# Note: we subtract 15 to compensate for the size of the dot on the screen.
# (is a 30x30 image--lms/static/images/green-pointer.png).
(self.gx, self.gy) = [int(x) - 15 for x in m.groups()]
else:
(self.gx, self.gy) = (0, 0)
def _extra_context(self):
return {'gx': self.gx,
'gy': self.gy}
#-----------------------------------------------------------------------------
@registry.register
class Crystallography(InputTypeBase):
"""
An input for crystallography -- user selects 3 points on the axes, and we get a plane.
TODO: what's the actual value format?
"""
template = "crystallography.html"
tags = ['crystallography']
@classmethod
def get_attributes(cls):
"""
Note: height, width are required.
"""
return [Attribute('height'),
Attribute('width'),
]
# -------------------------------------------------------------------------
@registry.register
class VseprInput(InputTypeBase):
"""
Input for molecular geometry--show possible structures, let student
pick structure and label positions with atoms or electron pairs.
"""
template = 'vsepr_input.html'
tags = ['vsepr_input']
@classmethod
def get_attributes(cls):
"""
Note: height, width, molecules and geometries are required.
"""
return [Attribute('height'),
Attribute('width'),
Attribute('molecules'),
Attribute('geometries'),
]
#-------------------------------------------------------------------------
@registry.register
class ChemicalEquationInput(InputTypeBase):
"""
An input type for entering chemical equations. Supports live preview.
Example:
<chemicalequationinput size="50"/>
options: size -- width of the textbox.
"""
template = "chemicalequationinput.html"
tags = ['chemicalequationinput']
@classmethod
def get_attributes(cls):
"""
Can set size of text field.
"""
return [Attribute('size', '20'), ]
def _extra_context(self):
"""
TODO (vshnayder): Get rid of this once we have a standard way of requiring js to be loaded.
"""
return {
'previewer': '{static_url}js/capa/chemical_equation_preview.js'.format(
static_url=self.capa_system.STATIC_URL),
}
def handle_ajax(self, dispatch, data):
"""
Since we only have chemcalc preview this input, check to see if it
matches the corresponding dispatch and send it through if it does
"""
if dispatch == 'preview_chemcalc':
return self.preview_chemcalc(data)
return {}
def preview_chemcalc(self, data):
"""
Render an html preview of a chemical formula or equation. get should
contain a key 'formula' and value 'some formula string'.
Returns a json dictionary:
{
'preview' : 'the-preview-html' or ''
'error' : 'the-error' or ''
}
"""
_ = self.capa_system.i18n.ugettext
result = {'preview': '',
'error': ''}
try:
formula = data['formula']
except KeyError:
result['error'] = _("No formula specified.")
return result
try:
result['preview'] = chemcalc.render_to_html(formula)
except pyparsing.ParseException as err:
result['error'] = _("Couldn't parse formula: {error_msg}").format(error_msg=err.msg)
except Exception:
# this is unexpected, so log
log.warning(
"Error while previewing chemical formula", exc_info=True)
result['error'] = _("Error while rendering preview")
return result
#-------------------------------------------------------------------------
@registry.register
class FormulaEquationInput(InputTypeBase):
"""
An input type for entering formula equations. Supports live preview.
Example:
<formulaequationinput size="50"/>
options: size -- width of the textbox.
trailing_text -- text to show after the input textbox when
rendered, same as textline (useful for units)
"""
template = "formulaequationinput.html"
tags = ['formulaequationinput']
@classmethod
def get_attributes(cls):
"""
Can set size of text field.
"""
return [
Attribute('size', '20'),
Attribute('inline', False),
Attribute('trailing_text', ''),
]
def _extra_context(self):
"""
TODO (vshnayder): Get rid of 'previewer' once we have a standard way of requiring js to be loaded.
"""
# `reported_status` is basically `status`, except we say 'unanswered'
return {
'previewer': '{static_url}js/capa/src/formula_equation_preview.js'.format(
static_url=self.capa_system.STATIC_URL),
}
def handle_ajax(self, dispatch, get):
"""
Since we only have formcalc preview this input, check to see if it
matches the corresponding dispatch and send it through if it does
"""
if dispatch == 'preview_formcalc':
return self.preview_formcalc(get)
return {}
def preview_formcalc(self, get):
"""
Render an preview of a formula or equation. `get` should
contain a key 'formula' with a math expression.
Returns a json dictionary:
{
'preview' : '<some latex>' or ''
'error' : 'the-error' or ''
'request_start' : <time sent with request>
}
"""
_ = self.capa_system.i18n.ugettext
result = {'preview': '',
'error': ''}
try:
formula = get['formula']
except KeyError:
result['error'] = _("No formula specified.")
return result
result['request_start'] = int(get.get('request_start', 0))
try:
# TODO add references to valid variables and functions
# At some point, we might want to mark invalid variables as red
# or something, and this is where we would need to pass those in.
result['preview'] = latex_preview(formula)
except pyparsing.ParseException:
result['error'] = _("Sorry, couldn't parse formula")
result['formula'] = formula
except Exception:
# this is unexpected, so log
log.warning(
"Error while previewing formula", exc_info=True
)
result['error'] = _("Error while rendering preview")
return result
#-----------------------------------------------------------------------------
@registry.register
class DragAndDropInput(InputTypeBase):
"""
Input for drag and drop problems. Allows student to drag and drop images and
labels to base image.
"""
template = 'drag_and_drop_input.html'
tags = ['drag_and_drop_input']
def setup(self):
def parse(tag, tag_type):
"""Parses <tag ... /> xml element to dictionary. Stores
'draggable' and 'target' tags with attributes to dictionary and
returns last.
Args:
tag: xml etree element <tag...> with attributes
tag_type: 'draggable' or 'target'.
If tag_type is 'draggable' : all attributes except id
(name or label or icon or can_reuse) are optional
If tag_type is 'target' all attributes (name, x, y, w, h)
are required. (x, y) - coordinates of center of target,
w, h - weight and height of target.
Returns:
Dictionary of vaues of attributes:
dict{'name': smth, 'label': smth, 'icon': smth,
'can_reuse': smth}.
"""
tag_attrs = dict()
tag_attrs['draggable'] = {
'id': Attribute._sentinel,
'label': "", 'icon': "",
'can_reuse': ""
}
tag_attrs['target'] = {
'id': Attribute._sentinel,
'x': Attribute._sentinel,
'y': Attribute._sentinel,
'w': Attribute._sentinel,
'h': Attribute._sentinel
}
dic = dict()
for attr_name in tag_attrs[tag_type].keys():
dic[attr_name] = Attribute(attr_name,
default=tag_attrs[tag_type][attr_name]).parse_from_xml(tag)
if tag_type == 'draggable' and not self.no_labels:
dic['label'] = dic['label'] or dic['id']
if tag_type == 'draggable':
dic['target_fields'] = [parse(target, 'target') for target in
tag.iterchildren('target')]
return dic
# add labels to images?:
self.no_labels = Attribute('no_labels',
default="False").parse_from_xml(self.xml)
to_js = dict()
# image drag and drop onto
to_js['base_image'] = Attribute('img').parse_from_xml(self.xml)
# outline places on image where to drag adn drop
to_js['target_outline'] = Attribute('target_outline',
default="False").parse_from_xml(self.xml)
# one draggable per target?
to_js['one_per_target'] = Attribute('one_per_target',
default="True").parse_from_xml(self.xml)
# list of draggables
to_js['draggables'] = [parse(draggable, 'draggable') for draggable in
self.xml.iterchildren('draggable')]
# list of targets
to_js['targets'] = [parse(target, 'target') for target in
self.xml.iterchildren('target')]
# custom background color for labels:
label_bg_color = Attribute('label_bg_color',
default=None).parse_from_xml(self.xml)
if label_bg_color:
to_js['label_bg_color'] = label_bg_color
self.loaded_attributes['drag_and_drop_json'] = json.dumps(to_js)
self.to_render.add('drag_and_drop_json')
#-------------------------------------------------------------------------
@registry.register
class EditAMoleculeInput(InputTypeBase):
"""
An input type for edit-a-molecule. Integrates with the molecule editor java applet.
Example:
<editamolecule size="50"/>
options: size -- width of the textbox.
"""
template = "editamolecule.html"
tags = ['editamoleculeinput']
@classmethod
def get_attributes(cls):
"""
Can set size of text field.
"""
return [Attribute('file'),
Attribute('missing', None)]
def _extra_context(self):
context = {
'applet_loader': '{static_url}js/capa/editamolecule.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#-----------------------------------------------------------------------------
@registry.register
class DesignProtein2dInput(InputTypeBase):
"""
An input type for design of a protein in 2D. Integrates with the Protex java applet.
Example:
<designprotein2d width="800" hight="500" target_shape="E;NE;NW;W;SW;E;none" />
"""
template = "designprotein2dinput.html"
tags = ['designprotein2dinput']
@classmethod
def get_attributes(cls):
"""
Note: width, hight, and target_shape are required.
"""
return [Attribute('width'),
Attribute('height'),
Attribute('target_shape')
]
def _extra_context(self):
context = {
'applet_loader': '{static_url}js/capa/design-protein-2d.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#-----------------------------------------------------------------------------
@registry.register
class EditAGeneInput(InputTypeBase):
"""
An input type for editing a gene.
Integrates with the genex GWT application.
Example:
<editagene genex_dna_sequence="CGAT" genex_problem_number="1"/>
"""
template = "editageneinput.html"
tags = ['editageneinput']
@classmethod
def get_attributes(cls):
"""
Note: width, height, and dna_sequencee are required.
"""
return [Attribute('genex_dna_sequence'),
Attribute('genex_problem_number')
]
def _extra_context(self):
context = {
'applet_loader': '{static_url}js/capa/edit-a-gene.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#---------------------------------------------------------------------
@registry.register
class AnnotationInput(InputTypeBase):
"""
Input type for annotations: students can enter some notes or other text
(currently ungraded), and then choose from a set of tags/optoins, which are graded.
Example:
<annotationinput>
<title>Annotation Exercise</title>
<text>
They are the ones who, at the public assembly, had put savage derangement [ate] into my thinking
[phrenes] |89 on that day when I myself deprived Achilles of his honorific portion [geras]
</text>
<comment>Agamemnon says that ate or 'derangement' was the cause of his actions: why could Zeus say the same thing?</comment>
<comment_prompt>Type a commentary below:</comment_prompt>
<tag_prompt>Select one tag:</tag_prompt>
<options>
<option choice="correct">ate - both a cause and an effect</option>
<option choice="incorrect">ate - a cause</option>
<option choice="partially-correct">ate - an effect</option>
</options>
</annotationinput>
# TODO: allow ordering to be randomized
"""
template = "annotationinput.html"
tags = ['annotationinput']
def setup(self):
xml = self.xml
self.debug = False # set to True to display extra debug info with input
self.return_to_annotation = True # return only works in conjunction with annotatable xmodule
self.title = xml.findtext('./title', 'Annotation Exercise')
self.text = xml.findtext('./text')
self.comment = xml.findtext('./comment')
self.comment_prompt = xml.findtext(
'./comment_prompt', 'Type a commentary below:')
self.tag_prompt = xml.findtext('./tag_prompt', 'Select one tag:')
self.options = self._find_options()
# Need to provide a value that JSON can parse if there is no
# student-supplied value yet.
if self.value == '':
self.value = 'null'
self._validate_options()
def _find_options(self):
""" Returns an array of dicts where each dict represents an option. """
elements = self.xml.findall('./options/option')
return [{
'id': index,
'description': option.text,
'choice': option.get('choice')
} for (index, option) in enumerate(elements)]
def _validate_options(self):
""" Raises a ValueError if the choice attribute is missing or invalid. """
valid_choices = ('correct', 'partially-correct', 'incorrect')
for option in self.options:
choice = option['choice']
if choice is None:
raise ValueError('Missing required choice attribute.')
elif choice not in valid_choices:
raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(
choice, ', '.join(valid_choices)))
def _unpack(self, json_value):
""" Unpacks the json input state into a dict. """
d = json.loads(json_value)
if not isinstance(d, dict):
d = {}
comment_value = d.get('comment', '')
if not isinstance(comment_value, basestring):
comment_value = ''
options_value = d.get('options', [])
if not isinstance(options_value, list):
options_value = []
return {
'options_value': options_value,
'has_options_value': len(options_value) > 0, # for convenience
'comment_value': comment_value,
}
def _extra_context(self):
extra_context = {
'title': self.title,
'text': self.text,
'comment': self.comment,
'comment_prompt': self.comment_prompt,
'tag_prompt': self.tag_prompt,
'options': self.options,
'return_to_annotation': self.return_to_annotation,
'debug': self.debug
}
extra_context.update(self._unpack(self.value))
return extra_context
@registry.register
class ChoiceTextGroup(InputTypeBase):
r"""
Groups of radiobutton/checkboxes with text inputs.
Examples:
RadioButton problem
<problem>
<startouttext/>
A person rolls a standard die 100 times and records the results.
On the first roll they received a "1". Given this information
select the correct choice and fill in numbers to make it accurate.
<endouttext/>
<choicetextresponse>
<radiotextgroup>
<choice correct="false">The lowest number rolled was:
<decoy_input/> and the highest number rolled was:
<decoy_input/> .</choice>
<choice correct="true">The lowest number rolled was <numtolerance_input answer="1"/>
and there is not enough information to determine the highest number rolled.
</choice>
<choice correct="false">There is not enough information to determine the lowest
number rolled, and the highest number rolled was:
<decoy_input/> .
</choice>
</radiotextgroup>
</choicetextresponse>
</problem>
CheckboxProblem:
<problem>
<startouttext/>
A person randomly selects 100 times, with replacement, from the list of numbers \(\sqrt{2}\) , 2, 3, 4 ,5 ,6
and records the results. The first number they pick is \(\sqrt{2}\) Given this information
select the correct choices and fill in numbers to make them accurate.
<endouttext/>
<choicetextresponse>
<checkboxtextgroup>
<choice correct="true">
The lowest number selected was <numtolerance_input answer="1.4142" tolerance="0.01"/>
</choice>
<choice correct="false">
The highest number selected was <decoy_input/> .
</choice>
<choice correct="true">There is not enough information given to determine the highest number
which was selected.
</choice>
<choice correct="false">There is not enough information given to determine the lowest number
selected.
</choice>
</checkboxtextgroup>
</choicetextresponse>
</problem>
In the preceding examples the <decoy_input/> is used to generate a textinput html element
in the problem's display. Since it is inside of an incorrect choice, no answer given
for it will be correct, and thus specifying an answer for it is not needed.
"""
template = "choicetext.html"
tags = ['radiotextgroup', 'checkboxtextgroup']
def setup(self):
"""
Performs setup for the initial rendering of the problem.
`self.html_input_type` determines whether this problem is displayed
with radiobuttons or checkboxes
If the initial value of `self.value` is '' change it to {} so that
the template has an empty dictionary to work with.
sets the value of self.choices to be equal to the return value of
`self.extract_choices`
"""
self.text_input_values = {}
if self.tag == 'radiotextgroup':
self.html_input_type = "radio"
elif self.tag == 'checkboxtextgroup':
self.html_input_type = "checkbox"
else:
_ = self.capa_system.i18n.ugettext
msg = _("{input_type}: unexpected tag {tag_name}").format(
input_type="ChoiceTextGroup", tag_name=self.tag
)
raise Exception(msg)
if self.value == '':
# Make `value` an empty dictionary, if it currently has an empty
# value. This is necessary because the template expects a
# dictionary.
self.value = {}
self.choices = self.extract_choices(self.xml, self.capa_system.i18n)
@classmethod
def get_attributes(cls):
"""
Returns a list of `Attribute` for this problem type
"""
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
return [
Attribute("show_correctness", "always"),
Attribute("submitted_message", _("Answer received.")),
]
def _extra_context(self):
"""
Returns a dictionary of extra content necessary for rendering this InputType.
`input_type` is either 'radio' or 'checkbox' indicating whether the choices for
this problem will have radiobuttons or checkboxes.
"""
return {
'input_type': self.html_input_type,
'choices': self.choices
}
@staticmethod
def extract_choices(element, i18n):
"""
Extracts choices from the xml for this problem type.
If we have xml that is as follows(choice names will have been assigned
by now)
<radiotextgroup>
<choice correct = "true" name ="1_2_1_choiceinput_0bc">
The number
<numtolerance_input name = "1_2_1_choiceinput0_numtolerance_input_0" answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false" name = "1_2_1_choiceinput_1bc>
False demonstration choice
</choice>
</radiotextgroup>
Choices are used for rendering the problem properly
The function will setup choices as follows:
choices =[
("1_2_1_choiceinput_0bc",
[{'type': 'text', 'contents': "The number", 'tail_text': '',
'value': ''
},
{'type': 'textinput',
'contents': "1_2_1_choiceinput0_numtolerance_input_0",
'tail_text': 'Is the mean of the list',
'value': ''
}
]
),
("1_2_1_choiceinput_1bc",
[{'type': 'text', 'contents': "False demonstration choice",
'tail_text': '',
'value': ''
}
]
)
]
"""
_ = i18n.ugettext
choices = []
for choice in element:
if choice.tag != 'choice':
msg = Text("[capa.inputtypes.extract_choices] {0}").format(
# Translators: a "tag" is an XML element, such as "<b>" in HTML
Text(_("Expected a {expected_tag} tag; got {given_tag} instead")).format(
expected_tag="<choice>",
given_tag=choice.tag,
)
)
raise Exception(msg)
components = []
choice_text = ''
if choice.text is not None:
choice_text += choice.text
# Initialize our dict for the next content
adder = {
'type': 'text',
'contents': choice_text,
'tail_text': '',
'value': ''
}
components.append(adder)
for elt in choice:
# for elements in the choice e.g. <text> <numtolerance_input>
adder = {
'type': 'text',
'contents': '',
'tail_text': '',
'value': ''
}
tag_type = elt.tag
# If the current `elt` is a <numtolerance_input> set the
# `adder`type to 'numtolerance_input', and 'contents' to
# the `elt`'s name.
# Treat decoy_inputs and numtolerance_inputs the same in order
# to prevent students from reading the Html and figuring out
# which inputs are valid
if tag_type in ('numtolerance_input', 'decoy_input'):
# We set this to textinput, so that we get a textinput html
# element.
adder['type'] = 'textinput'
adder['contents'] = elt.get('name')
else:
adder['contents'] = elt.text
# Add any tail text("is the mean" in the example)
adder['tail_text'] = elt.tail if elt.tail else ''
components.append(adder)
# Add the tuple for the current choice to the list of choices
choices.append((choice.get("name"), components))
return choices
|
pyconca/2013-web
|
refs/heads/pyconca2013
|
symposion/cms/admin.py
|
7
|
from django.contrib import admin
import reversion
from .models import Page
class PageAdmin(reversion.VersionAdmin):
pass
admin.site.register(Page, PageAdmin)
|
alvarofierroclavero/scikit-learn
|
refs/heads/master
|
sklearn/tree/tests/test_tree.py
|
72
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
|
provaleks/o8
|
refs/heads/8.0
|
addons/point_of_sale/test/test_frontend.py
|
309
|
import openerp.tests
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestUi(openerp.tests.HttpCase):
def test_01_pos_basic_order(self):
self.phantom_js("/", "openerp.Tour.run('pos_basic_order', 'test')", "openerp.Tour.tours.pos_basic_order", login="admin")
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-web/azure/mgmt/web/models/dimension.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Dimension(Model):
"""Dimension of a resource metric. For e.g. instance specific HTTP requests
for a web app,
where instance name is dimension of the metric HTTP request.
:param name:
:type name: str
:param display_name:
:type display_name: str
:param internal_name:
:type internal_name: str
:param to_be_exported_for_shoebox:
:type to_be_exported_for_shoebox: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'internal_name': {'key': 'internalName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(self, name=None, display_name=None, internal_name=None, to_be_exported_for_shoebox=None):
super(Dimension, self).__init__()
self.name = name
self.display_name = display_name
self.internal_name = internal_name
self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
|
aboSamoor/polyglot
|
refs/heads/master
|
polyglot/mapping/__init__.py
|
7
|
from .base import CountedVocabulary, OrderedVocabulary, VocabularyBase
from .embeddings import Embedding
from .expansion import CaseExpander, DigitExpander
__all__ = ['CountedVocabulary',
'OrderedVocabulary',
'VocabularyBase',
'Embedding',
'CaseExpander',
'DigitExpander']
|
lfz/Guided-Denoise
|
refs/heads/master
|
Attackset/Iter4_ensv3_resv2_inresv2_random/nets/inception_v1.py
|
28
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v1_base(inputs,
final_endpoint='Mixed_5c',
scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV1', [inputs, num_classes],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='AvgPool_0a_7x7')
net = slim.dropout(net,
dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
inception_v1_arg_scope = inception_utils.inception_arg_scope
|
dlazz/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/ovs/openvswitch_bridge.py
|
75
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
# Portions copyright @ 2015 VMware, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: openvswitch_bridge
version_added: 1.4
author: "David Stygstra (@stygstra)"
short_description: Manage Open vSwitch bridges
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch bridges
options:
bridge:
required: true
description:
- Name of bridge or fake bridge to manage
parent:
version_added: "2.3"
description:
- Bridge parent of the fake bridge to manage
vlan:
version_added: "2.3"
description:
- The VLAN id of the fake bridge to manage (must be between 0 and
4095). This parameter is required if I(parent) parameter is set.
state:
default: "present"
choices: [ present, absent ]
description:
- Whether the bridge should exist
timeout:
default: 5
description:
- How long to wait for ovs-vswitchd to respond
external_ids:
version_added: 2.0
description:
- A dictionary of external-ids. Omitting this parameter is a No-op.
To clear all external-ids pass an empty value.
fail_mode:
version_added: 2.0
choices : [secure, standalone]
description:
- Set bridge fail-mode. The default value (None) is a No-op.
set:
version_added: 2.3
description:
- Run set command after bridge configuration. This parameter is
non-idempotent, play will always return I(changed) state if
present
'''
EXAMPLES = '''
# Create a bridge named br-int
- openvswitch_bridge:
bridge: br-int
state: present
# Create a fake bridge named br-int within br-parent on the VLAN 405
- openvswitch_bridge:
bridge: br-int
parent: br-parent
vlan: 405
state: present
# Create an integration bridge
- openvswitch_bridge:
bridge: br-int
state: present
fail_mode: secure
args:
external_ids:
bridge-id: br-int
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
def _fail_mode_to_str(text):
if not text:
return None
else:
return text.strip()
def _external_ids_to_dict(text):
if not text:
return None
else:
d = {}
for l in text.splitlines():
if l:
k, v = l.split('=')
d[k] = v
return d
def map_obj_to_commands(want, have, module):
commands = list()
if module.params['state'] == 'absent':
if have:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s del-br"
" %(bridge)s")
command = templatized_command % module.params
commands.append(command)
else:
if have:
if want['fail_mode'] != have['fail_mode']:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set-fail-mode %(bridge)s"
" %(fail_mode)s")
command = templatized_command % module.params
commands.append(command)
if want['external_ids'] != have['external_ids']:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" br-set-external-id %(bridge)s")
command = templatized_command % module.params
if want['external_ids']:
for k, v in iteritems(want['external_ids']):
if (k not in have['external_ids']
or want['external_ids'][k] != have['external_ids'][k]):
command += " " + k + " " + v
commands.append(command)
else:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s add-br"
" %(bridge)s")
command = templatized_command % module.params
if want['parent']:
templatized_command = "%(parent)s %(vlan)s"
command += " " + templatized_command % module.params
if want['set']:
templatized_command = " -- set %(set)s"
command += templatized_command % module.params
commands.append(command)
if want['fail_mode']:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set-fail-mode %(bridge)s"
" %(fail_mode)s")
command = templatized_command % module.params
commands.append(command)
if want['external_ids']:
for k, v in iteritems(want['external_ids']):
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" br-set-external-id %(bridge)s")
command = templatized_command % module.params
command += " " + k + " " + v
commands.append(command)
return commands
def map_config_to_obj(module):
templatized_command = "%(ovs-vsctl)s -t %(timeout)s list-br"
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
if rc != 0:
module.fail_json(msg=err)
obj = {}
if module.params['bridge'] in out.splitlines():
obj['bridge'] = module.params['bridge']
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s br-to-parent"
" %(bridge)s")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['parent'] = out.strip()
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s br-to-vlan"
" %(bridge)s")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['vlan'] = out.strip()
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s get-fail-mode"
" %(bridge)s")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['fail_mode'] = _fail_mode_to_str(out)
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s br-get-external-id"
" %(bridge)s")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['external_ids'] = _external_ids_to_dict(out)
return obj
def map_params_to_obj(module):
obj = {
'bridge': module.params['bridge'],
'parent': module.params['parent'],
'vlan': module.params['vlan'],
'fail_mode': module.params['fail_mode'],
'external_ids': module.params['external_ids'],
'set': module.params['set']
}
return obj
def main():
""" Entry point. """
argument_spec = {
'bridge': {'required': True},
'parent': {'default': None},
'vlan': {'default': None, 'type': 'int'},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
'external_ids': {'default': None, 'type': 'dict'},
'fail_mode': {'default': None},
'set': {'required': False, 'default': None}
}
required_if = [('parent', not None, ('vlan',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
result = {'changed': False}
# We add ovs-vsctl to module_params to later build up templatized commands
module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True)
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
for c in commands:
module.run_command(c, check_rc=True)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
Dezmonius/Deslium
|
refs/heads/master
|
Deslium Software/Deslium Hestia/CreatorKivyProject-master/libs/plugins/button/__init__.py
|
3
|
# -*- coding: utf-8 -*-
#
# Пример простого плагина. Добавляет кнопку в actionbar.
#
from kivy.clock import Clock
def test_plugin(interval):
app.screen.ids.action_bar.right_action_items = [
['shopping-cart', lambda x: None], ['more-vert', lambda x: None]
]
Clock.schedule_once(test_plugin, 5)
|
uranix/ttpy
|
refs/heads/python3
|
examples/test_amen.py
|
2
|
import sys
sys.path.append('../')
import tt
from tt.amen import amen_solve
""" This program test two subroutines: matrix-by-vector multiplication
and linear system solution via AMR scheme"""
d = 12
A = tt.qlaplace_dd([d])
x = tt.ones(2,d)
y = amen_solve(A,x,x,1e-6)
|
teemulehtinen/a-plus
|
refs/heads/master
|
lib/api/authentication/__init__.py
|
3
|
from lib.crypto import get_signed_message
GRADER_AUTH_TOKEN = 'token'
def get_graderauth_submission_params(submission):
token = "s{:x}.{}".format(submission.id, submission.hash)
return [(GRADER_AUTH_TOKEN, token)]
def get_graderauth_exercise_params(exercise, user=None):
user_id = str(user.id) if user else ''
identifier = "{:s}.{:d}".format(user_id, exercise.id)
message = get_signed_message(identifier).decode('ascii')
token = "e{:s}".format(message)
return [(GRADER_AUTH_TOKEN, token)]
|
devs1991/test_edx_docmode
|
refs/heads/master
|
lms/djangoapps/class_dashboard/views.py
|
57
|
"""
Handles requests for data, returning a json
"""
import logging
import json
from django.http import HttpResponse
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import get_course_overview_with_access
from courseware.access import has_access
from class_dashboard import dashboard_data
log = logging.getLogger(__name__)
def has_instructor_access_for_class(user, course_id):
"""
Returns true if the `user` is an instructor for the course.
"""
course = get_course_overview_with_access(user, 'staff', course_id)
return bool(has_access(user, 'staff', course))
def all_sequential_open_distrib(request, course_id):
"""
Creates a json with the open distribution for all the subsections in the course.
`request` django request
`course_id` the course ID for the course interested in
Returns the format in dashboard_data.get_d3_sequential_open_distrib
"""
data = {}
# Only instructor for this particular course can request this information
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if has_instructor_access_for_class(request.user, course_key):
try:
data = dashboard_data.get_d3_sequential_open_distrib(course_key)
except Exception as ex: # pylint: disable=broad-except
log.error('Generating metrics failed with exception: %s', ex)
data = {'error': "error"}
else:
data = {'error': "Access Denied: User does not have access to this course's data"}
return HttpResponse(json.dumps(data), content_type="application/json")
def all_problem_grade_distribution(request, course_id):
"""
Creates a json with the grade distribution for all the problems in the course.
`Request` django request
`course_id` the course ID for the course interested in
Returns the format in dashboard_data.get_d3_problem_grade_distrib
"""
data = {}
# Only instructor for this particular course can request this information
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if has_instructor_access_for_class(request.user, course_key):
try:
data = dashboard_data.get_d3_problem_grade_distrib(course_key)
except Exception as ex: # pylint: disable=broad-except
log.error('Generating metrics failed with exception: %s', ex)
data = {'error': "error"}
else:
data = {'error': "Access Denied: User does not have access to this course's data"}
return HttpResponse(json.dumps(data), content_type="application/json")
def section_problem_grade_distrib(request, course_id, section):
"""
Creates a json with the grade distribution for the problems in the specified section.
`request` django request
`course_id` the course ID for the course interested in
`section` The zero-based index of the section for the course
Returns the format in dashboard_data.get_d3_section_grade_distrib
If this is requested multiple times quickly for the same course, it is better to call all_problem_grade_distribution
and pick out the sections of interest.
"""
data = {}
# Only instructor for this particular course can request this information
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if has_instructor_access_for_class(request.user, course_key):
try:
data = dashboard_data.get_d3_section_grade_distrib(course_key, section)
except Exception as ex: # pylint: disable=broad-except
log.error('Generating metrics failed with exception: %s', ex)
data = {'error': "error"}
else:
data = {'error': "Access Denied: User does not have access to this course's data"}
return HttpResponse(json.dumps(data), content_type="application/json")
|
pschmitt/home-assistant
|
refs/heads/dev
|
tests/components/awair/const.py
|
21
|
"""Constants used in Awair tests."""
import json
from homeassistant.const import CONF_ACCESS_TOKEN
from tests.common import load_fixture
AWAIR_UUID = "awair_24947"
CONFIG = {CONF_ACCESS_TOKEN: "12345"}
UNIQUE_ID = "foo@bar.com"
DEVICES_FIXTURE = json.loads(load_fixture("awair/devices.json"))
GEN1_DATA_FIXTURE = json.loads(load_fixture("awair/awair.json"))
GEN2_DATA_FIXTURE = json.loads(load_fixture("awair/awair-r2.json"))
GLOW_DATA_FIXTURE = json.loads(load_fixture("awair/glow.json"))
MINT_DATA_FIXTURE = json.loads(load_fixture("awair/mint.json"))
NO_DEVICES_FIXTURE = json.loads(load_fixture("awair/no_devices.json"))
OFFLINE_FIXTURE = json.loads(load_fixture("awair/awair-offline.json"))
OMNI_DATA_FIXTURE = json.loads(load_fixture("awair/omni.json"))
USER_FIXTURE = json.loads(load_fixture("awair/user.json"))
|
jfinkels/networkx
|
refs/heads/master
|
networkx/algorithms/tests/test_smetric.py
|
95
|
from nose.tools import assert_equal,raises
import networkx as nx
def test_smetric():
g = nx.Graph()
g.add_edge(1,2)
g.add_edge(2,3)
g.add_edge(2,4)
g.add_edge(1,4)
sm = nx.s_metric(g,normalized=False)
assert_equal(sm, 19.0)
# smNorm = nx.s_metric(g,normalized=True)
# assert_equal(smNorm, 0.95)
@raises(nx.NetworkXError)
def test_normalized():
sm = nx.s_metric(nx.Graph(),normalized=True)
|
whitzhu/kolibri
|
refs/heads/master
|
kolibri/auth/test/test_api.py
|
2
|
from __future__ import absolute_import, print_function, unicode_literals
import collections
import factory
import sys
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase as BaseTestCase
from django.contrib.sessions.models import Session
from .. import models
DUMMY_PASSWORD = "password"
# A weird hack because of http://bugs.python.org/issue17866
if sys.version_info >= (3,):
class APITestCase(BaseTestCase):
def assertItemsEqual(self, *args, **kwargs):
self.assertCountEqual(*args, **kwargs)
else:
class APITestCase(BaseTestCase):
pass
class FacilityFactory(factory.DjangoModelFactory):
class Meta:
model = models.Facility
name = factory.Sequence(lambda n: "Rock N' Roll High School #%d" % n)
class ClassroomFactory(factory.DjangoModelFactory):
class Meta:
model = models.Classroom
name = factory.Sequence(lambda n: "Basic Rock Theory #%d" % n)
class LearnerGroupFactory(factory.DjangoModelFactory):
class Meta:
model = models.LearnerGroup
name = factory.Sequence(lambda n: "Group #%d" % n)
class FacilityUserFactory(factory.DjangoModelFactory):
class Meta:
model = models.FacilityUser
facility = factory.SubFactory(FacilityFactory)
username = factory.Sequence(lambda n: 'user%d' % n)
password = factory.PostGenerationMethodCall('set_password', DUMMY_PASSWORD)
class DeviceOwnerFactory(factory.DjangoModelFactory):
class Meta:
model = models.DeviceOwner
username = factory.Sequence(lambda n: 'deviceowner%d' % n)
password = factory.PostGenerationMethodCall('set_password', DUMMY_PASSWORD)
class LearnerGroupAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.classrooms = [ClassroomFactory.create(parent=self.facility) for _ in range(3)]
self.learner_groups = []
for classroom in self.classrooms:
self.learner_groups += [LearnerGroupFactory.create(parent=classroom) for _ in range(5)]
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_learnergroup_list(self):
response = self.client.get(reverse('learnergroup-list'), format='json')
expected = [collections.OrderedDict((
('id', group.id),
('name', group.name),
('parent', group.parent.id),
)) for group in self.learner_groups]
self.assertItemsEqual(response.data, expected)
def test_learnergroup_detail(self):
response = self.client.get(reverse('learnergroup-detail', kwargs={'pk': self.learner_groups[0].id}), format='json')
expected = {
'id': self.learner_groups[0].id,
'name': self.learner_groups[0].name,
'parent': self.learner_groups[0].parent.id,
}
self.assertDictEqual(response.data, expected)
def test_parent_in_queryparam_with_one_id(self):
classroom_id = self.classrooms[0].id
response = self.client.get(reverse('learnergroup-list'), {'parent': classroom_id},
format='json')
expected = [collections.OrderedDict((
('id', group.id),
('name', group.name),
('parent', group.parent.id),
)) for group in self.learner_groups if group.parent.id == classroom_id]
self.assertItemsEqual(response.data, expected)
class ClassroomAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.classrooms = [ClassroomFactory.create(parent=self.facility) for _ in range(10)]
self.learner_group = LearnerGroupFactory.create(parent=self.classrooms[0])
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_classroom_list(self):
response = self.client.get(reverse('classroom-list'), format='json')
expected = [collections.OrderedDict((
('id', classroom.id),
('name', classroom.name),
('parent', classroom.parent.id),
)) for classroom in self.classrooms]
self.assertItemsEqual(response.data, expected)
def test_classroom_detail(self):
response = self.client.get(reverse('classroom-detail', kwargs={'pk': self.classrooms[0].id}), format='json')
expected = {
'id': self.classrooms[0].id,
'name': self.classrooms[0].name,
'parent': self.classrooms[0].parent.id,
}
self.assertDictEqual(response.data, expected)
class FacilityAPITestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility1 = FacilityFactory.create()
self.facility2 = FacilityFactory.create()
self.user1 = FacilityUserFactory.create(facility=self.facility1)
self.user2 = FacilityUserFactory.create(facility=self.facility2)
def test_sanity(self):
self.assertTrue(self.client.login(username=self.user1.username, password=DUMMY_PASSWORD, facility=self.facility1))
def test_facility_user_can_get_detail(self):
self.client.login(username=self.user1.username, password=DUMMY_PASSWORD, facility=self.facility1)
response = self.client.get(reverse('facility-detail', kwargs={'pk': self.facility1.pk}),
format='json')
# .assertDictContainsSubset checks that the first argument is a subset of the second argument
self.assertDictContainsSubset({
'name': self.facility1.name,
}, dict(response.data))
def test_anonymous_user_gets_empty_list(self):
response = self.client.get(reverse('facility-list'), format='json')
self.assertEqual(response.data, [])
def test_device_admin_can_create_facility(self):
new_facility_name = "New Facility"
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
response = self.client.post(reverse('facility-list'), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 1)
def test_facility_user_cannot_create_facility(self):
new_facility_name = "New Facility"
self.client.login(username=self.user1.username, password=DUMMY_PASSWORD, facility=self.facility1)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
response = self.client.post(reverse('facility-list'), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
def test_anonymous_user_cannot_create_facility(self):
new_facility_name = "New Facility"
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
response = self.client.post(reverse('facility-list'), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(models.Facility.objects.filter(name=new_facility_name).count(), 0)
def test_device_admin_can_update_facility(self):
old_facility_name = self.facility1.name
new_facility_name = "Renamed Facility"
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
self.assertEqual(models.Facility.objects.get(id=self.facility1.id).name, old_facility_name)
response = self.client.put(reverse('facility-detail', kwargs={"pk": self.facility1.id}), {"name": new_facility_name}, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(models.Facility.objects.get(id=self.facility1.id).name, new_facility_name)
def test_device_admin_can_delete_facility(self):
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
self.assertEqual(models.Facility.objects.filter(id=self.facility1.id).count(), 1)
response = self.client.delete(reverse('facility-detail', kwargs={"pk": self.facility1.id}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.Facility.objects.filter(id=self.facility1.id).count(), 0)
class UserCreationTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.client.login(username=self.device_owner.username, password=DUMMY_PASSWORD)
def test_creating_device_owner_via_api_sets_password_correctly(self):
new_username = "goliath"
new_password = "davidsucks"
bad_password = "ilovedavid"
response = self.client.post(reverse('deviceowner-list'), {"username": new_username, "password": new_password}, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.DeviceOwner.objects.get(username=new_username).check_password(new_password))
self.assertFalse(models.DeviceOwner.objects.get(username=new_username).check_password(bad_password))
def test_creating_facility_user_via_api_sets_password_correctly(self):
new_username = "goliath"
new_password = "davidsucks"
bad_password = "ilovedavid"
data = {"username": new_username, "password": new_password, "facility": self.facility.id}
response = self.client.post(reverse('facilityuser-list'), data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.FacilityUser.objects.get(username=new_username).check_password(new_password))
self.assertFalse(models.FacilityUser.objects.get(username=new_username).check_password(bad_password))
def test_creating_same_user_throws_409_error(self):
new_username = "goliath"
new_password = "davidsucks"
data = {"username": new_username, "password": new_password, "facility": self.facility.id}
response = self.client.post(reverse('facilityuser-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post(reverse('facilityuser-list'), data, format="json")
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
class LoginLogoutTestCase(APITestCase):
def setUp(self):
self.device_owner = DeviceOwnerFactory.create()
self.facility = FacilityFactory.create()
self.user = FacilityUserFactory.create(facility=self.facility)
self.admin = FacilityUserFactory.create(facility=self.facility, password="bar")
self.facility.add_admin(self.admin)
self.cr = ClassroomFactory.create(parent=self.facility)
self.cr.add_coach(self.admin)
def test_login_and_logout_device_owner(self):
self.client.post(reverse('session-list'), data={"username": self.device_owner.username, "password": DUMMY_PASSWORD})
sessions = Session.objects.all()
self.assertEqual(len(sessions), 1)
self.client.delete(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertEqual(len(Session.objects.all()), 0)
def test_login_and_logout_facility_user(self):
self.client.post(reverse('session-list'), data={"username": self.user.username, "password": DUMMY_PASSWORD, "facility": self.facility.id})
sessions = Session.objects.all()
self.assertEqual(len(sessions), 1)
self.client.delete(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertEqual(len(Session.objects.all()), 0)
def test_incorrect_credentials_does_not_log_in_user(self):
self.client.post(reverse('session-list'), data={"username": self.user.username, "password": "foo", "facility": self.facility.id})
sessions = Session.objects.all()
self.assertEqual(len(sessions), 0)
def test_session_return_admin_and_coach_kind(self):
self.client.post(reverse('session-list'), data={"username": self.admin.username, "password": "bar", "facility": self.facility.id})
response = self.client.get(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertTrue(response.data['kind'][0], 'ADMIN')
self.assertTrue(response.data['kind'][1], 'COACH')
def test_session_return_anon_kind(self):
response = self.client.get(reverse('session-detail', kwargs={'pk': 'current'}))
self.assertTrue(response.data['kind'][0], 'ANONYMOUS')
|
eq0rip/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/exporter/api/texture.py
|
125
|
from bpy import data, types
from .. import constants, logger
from .constants import IMAGE, MAG_FILTER, MIN_FILTER, MAPPING
from . import image
def _texture(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Texture):
texture = name
else:
texture = data.textures[name]
return func(texture, *args, **kwargs)
return inner
@_texture
def anisotropy(texture):
"""
:param texture:
:return: filter_size value
"""
logger.debug("texture.file_path(%s)", texture)
return texture.filter_size
@_texture
def file_name(texture):
"""
:param texture:
:return: file name
"""
logger.debug("texture.file_name(%s)", texture)
if texture.image:
return image.file_name(texture.image)
@_texture
def file_path(texture):
"""
:param texture:
:return: file path
"""
logger.debug("texture.file_path(%s)", texture)
if texture.image:
return image.file_path(texture.image)
@_texture
def image_node(texture):
"""
:param texture:
:return: texture's image node
"""
logger.debug("texture.image_node(%s)", texture)
return texture.image
@_texture
def mag_filter(texture):
"""
:param texture:
:return: THREE_mag_filter value
"""
logger.debug("texture.mag_filter(%s)", texture)
try:
val = texture.THREE_mag_filter
except AttributeError:
logger.debug("No THREE_mag_filter attribute found")
val = MAG_FILTER
return val
@_texture
def mapping(texture):
"""
:param texture:
:return: THREE_mapping value
"""
logger.debug("texture.mapping(%s)", texture)
try:
val = texture.THREE_mapping
except AttributeError:
logger.debug("No THREE_mapping attribute found")
val = MAPPING
return val
@_texture
def min_filter(texture):
"""
:param texture:
:return: THREE_min_filter value
"""
logger.debug("texture.min_filter(%s)", texture)
try:
val = texture.THREE_min_filter
except AttributeError:
logger.debug("No THREE_min_filter attribute found")
val = MIN_FILTER
return val
@_texture
def repeat(texture):
"""The repeat parameters of the texture node
:param texture:
:returns: repeat_x, and repeat_y values
"""
logger.debug("texture.repeat(%s)", texture)
return (texture.repeat_x, texture.repeat_y)
@_texture
def wrap(texture):
"""The wrapping parameters of the texture node
:param texture:
:returns: tuple of THREE compatible wrapping values
"""
logger.debug("texture.wrap(%s)", texture)
wrapping = {
True: constants.WRAPPING.MIRROR,
False: constants.WRAPPING.REPEAT
}
return (wrapping[texture.use_mirror_x],
wrapping[texture.use_mirror_y])
def textures():
"""
:return: list of texture node names that are IMAGE
"""
logger.debug("texture.textures()")
for mat in data.materials:
if mat.users == 0:
continue
for slot in mat.texture_slots:
if slot and slot.use and slot.texture.type == IMAGE:
yield slot.texture.name
|
BartoszCichecki/onlinepython
|
refs/heads/master
|
onlinepython/pypy-2.4.0-win32/lib-python/2.7/encodings/quopri_codec.py
|
419
|
"""Codec for quoted-printable encoding.
Like base64 and rot13, this returns Python strings, not Unicode.
"""
import codecs, quopri
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def quopri_encode(input, errors='strict'):
"""Encode the input, returning a tuple (output object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
# using str() because of cStringIO's Unicode undesired Unicode behavior.
f = StringIO(str(input))
g = StringIO()
quopri.encode(f, g, 1)
output = g.getvalue()
return (output, len(input))
def quopri_decode(input, errors='strict'):
"""Decode the input, returning a tuple (output object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
f = StringIO(str(input))
g = StringIO()
quopri.decode(f, g)
output = g.getvalue()
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return quopri_encode(input,errors)
def decode(self, input,errors='strict'):
return quopri_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return quopri_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return quopri_decode(input, self.errors)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
# encodings module API
def getregentry():
return codecs.CodecInfo(
name='quopri',
encode=quopri_encode,
decode=quopri_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
svinota/cxnet
|
refs/heads/master
|
cxnet/netlink/generic.py
|
1
|
# -*- coding: utf-8 -*-
"""
cxnet.netlink.generic
~~~~~~~~~~~~~~~~~~~~~
This module implements generic Netlink socket object.
:copyright: (c) 2011 by Peter V. Saveliev, see AUTHORS for more details.
:license: GPL, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
from ctypes import Structure
from ctypes import sizeof, create_string_buffer, addressof
from ctypes import c_byte, c_ubyte, c_uint16
from .core import nlmsghdr, nl_socket, nlattr, attr_msg, NLM_F_REQUEST, \
NLMSG_MAX_LEN, NLMSG_MIN_TYPE, NLMSG_ALIGN
from .exceptions import NetlinkError, NetlinkNoSuchProtocol
class genlmsghdr(Structure):
_fields_ = [
("cmd", c_ubyte),
("version", c_ubyte),
("reserved", c_uint16),
]
class genlmsg(Structure, attr_msg):
_fields_ = [
("hdr", nlmsghdr),
("genlmsghdr", genlmsghdr),
("data", c_byte * (NLMSG_MAX_LEN - sizeof(nlmsghdr) - sizeof(genlmsghdr))),
]
GENL_NAMSIZ = 16 # length of family name
GENL_MIN_ID = NLMSG_MIN_TYPE
GENL_MAX_ID = 1023
GENL_HDRLEN = NLMSG_ALIGN(sizeof(genlmsghdr))
GENL_ADMIN_PERM = 0x01
GENL_CMD_CAP_DO = 0x02
GENL_CMD_CAP_DUMP = 0x04
GENL_CMD_CAP_HASPOL = 0x08
#
# List of reserved static generic netlink identifiers:
#
GENL_ID_GENERATE = 0
GENL_ID_CTRL = NLMSG_MIN_TYPE
#
# Controller
#
CTRL_CMD_UNSPEC = 0x0
CTRL_CMD_NEWFAMILY = 0x1
CTRL_CMD_DELFAMILY = 0x2
CTRL_CMD_GETFAMILY = 0x3
CTRL_CMD_NEWOPS = 0x4
CTRL_CMD_DELOPS = 0x5
CTRL_CMD_GETOPS = 0x6
CTRL_CMD_NEWMCAST_GRP = 0x7
CTRL_CMD_DELMCAST_GRP = 0x8
CTRL_CMD_GETMCAST_GRP = 0x9 # unused
CTRL_ATTR_UNSPEC = 0x0
CTRL_ATTR_FAMILY_ID = 0x1
CTRL_ATTR_FAMILY_NAME = 0x2
CTRL_ATTR_VERSION = 0x3
CTRL_ATTR_HDRSIZE = 0x4
CTRL_ATTR_MAXATTR = 0x5
CTRL_ATTR_OPS = 0x6
CTRL_ATTR_MCAST_GROUPS = 0x7
CTRL_ATTR_OP_UNSPEC = 0x0
CTRL_ATTR_OP_ID = 0x1
CTRL_ATTR_OP_FLAGS = 0x2
CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
CTRL_ATTR_MCAST_GRP_NAME = 0x1
CTRL_ATTR_MCAST_GRP_ID = 0x2
class genl_socket(nl_socket):
msg = genlmsg
def get_protocol_id(self, name):
if sys.version_info >= (3, 0):
buf = create_string_buffer(name.encode("utf-8"))
else:
buf = create_string_buffer(name)
self.send_cmd(GENL_ID_CTRL, CTRL_CMD_GETFAMILY,
CTRL_ATTR_FAMILY_NAME, buf)
try:
l, msg = self.recv()
except NetlinkError,e:
if e.code == -2:
raise NetlinkNoSuchProtocol(e.hdr)
else:
raise e
name = nlattr.from_address(addressof(msg.data))
prid = nlattr.from_address(addressof(msg.data) +
NLMSG_ALIGN(name.nla_len))
assert prid.nla_type == CTRL_ATTR_FAMILY_ID
return c_uint16.from_address(addressof(prid) + sizeof(prid)).value
def send_cmd(self, prid, cmd, nla_type, nla_data, seq=0):
msg = genlmsg()
msg.hdr.type = prid
msg.hdr.flags = NLM_F_REQUEST
msg.hdr.sequence_number = seq
msg.genlmsghdr.cmd = cmd
msg.genlmsghdr.version = 0x1
msg.set_attr(nla_type, nla_data)
return self.send(msg, msg.size())
|
Lujeni/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/setup.py
|
17
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: setup
version_added: historical
short_description: Gathers facts about remote hosts
options:
gather_subset:
version_added: "2.1"
description:
- "If supplied, restrict the additional facts collected to the given subset.
Possible values: C(all), C(min), C(hardware), C(network), C(virtual), C(ohai), and
C(facter). Can specify a list of values to specify a larger subset.
Values can also be used with an initial C(!) to specify that
that specific subset should not be collected. For instance:
C(!hardware,!network,!virtual,!ohai,!facter). If C(!all) is specified
then only the min subset is collected. To avoid collecting even the
min subset, specify C(!all,!min). To collect only specific facts,
use C(!all,!min), and specify the particular fact subsets.
Use the filter parameter if you do not want to display some collected
facts."
required: false
default: "all"
gather_timeout:
version_added: "2.2"
description:
- Set the default timeout in seconds for individual fact gathering.
required: false
default: 10
filter:
version_added: "1.1"
description:
- If supplied, only return facts that match this shell-style (fnmatch) wildcard.
required: false
default: "*"
fact_path:
version_added: "1.3"
description:
- Path used for local ansible facts (C(*.fact)) - files in this dir
will be run (if executable) and their results be added to C(ansible_local) facts
if a file is not executable it is read. Check notes for Windows options. (from 2.1 on)
File/results format can be JSON or INI-format. The default C(fact_path) can be
specified in C(ansible.cfg) for when setup is automatically called as part of
C(gather_facts).
required: false
default: /etc/ansible/facts.d
description:
- This module is automatically called by playbooks to gather useful
variables about remote hosts that can be used in playbooks. It can also be
executed directly by C(/usr/bin/ansible) to check what variables are
available to a host. Ansible provides many I(facts) about the system,
automatically.
- This module is also supported for Windows targets.
notes:
- More ansible facts will be added with successive releases. If I(facter) or
I(ohai) are installed, variables from these programs will also be snapshotted
into the JSON file for usage in templating. These variables are prefixed
with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
bubbled up to the caller. Using the ansible facts and choosing to not
install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
remote systems. (See also M(facter) and M(ohai).)
- The filter option filters only the first level subkey below ansible_facts.
- If the target host is Windows, you will not currently have the ability to use
C(filter) as this is provided by a simpler implementation of the module.
- If the target host is Windows you can now use C(fact_path). Make sure that this path
exists on the target host. Files in this path MUST be PowerShell scripts (``*.ps1``) and
their output must be formattable in JSON (Ansible will take care of this). Test the
output of your scripts.
This option was added in Ansible 2.1.
- This module is also supported for Windows targets.
- This module should be run with elevated privileges on BSD systems to gather facts like ansible_product_version.
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = """
# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
# ansible all -m setup --tree /tmp/facts
# Display only facts regarding memory found by ansible on all hosts and output them.
# ansible all -m setup -a 'filter=ansible_*_mb'
# Display only facts returned by facter.
# ansible all -m setup -a 'filter=facter_*'
# Collect only facts returned by facter.
# ansible all -m setup -a 'gather_subset=!all,!any,facter'
- name: Collect only facts returned by facter
setup:
gather_subset:
- '!all'
- '!any'
- facter
# Display only facts about certain interfaces.
# ansible all -m setup -a 'filter=ansible_eth[0-2]'
# Restrict additional gathered facts to network and virtual (includes default minimum facts)
# ansible all -m setup -a 'gather_subset=network,virtual'
# Collect only network and virtual (excludes default minimum facts)
# ansible all -m setup -a 'gather_subset=!all,!any,network,virtual'
# Do not call puppet facter or ohai even if present.
# ansible all -m setup -a 'gather_subset=!facter,!ohai'
# Only collect the default minimum amount of facts:
# ansible all -m setup -a 'gather_subset=!all'
# Collect no facts, even the default minimum subset of facts:
# ansible all -m setup -a 'gather_subset=!all,!min'
# Display facts from Windows hosts with custom facts stored in C(C:\\custom_facts).
# ansible windows -m setup -a "fact_path='c:\\custom_facts'"
"""
# import module snippets
from ...module_utils.basic import AnsibleModule
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts import ansible_collector
from ansible.module_utils.facts import default_collectors
def main():
module = AnsibleModule(
argument_spec=dict(
gather_subset=dict(default=["all"], required=False, type='list'),
gather_timeout=dict(default=10, required=False, type='int'),
filter=dict(default="*", required=False),
fact_path=dict(default='/etc/ansible/facts.d', required=False, type='path'),
),
supports_check_mode=True,
)
gather_subset = module.params['gather_subset']
gather_timeout = module.params['gather_timeout']
filter_spec = module.params['filter']
# TODO: this mimics existing behavior where gather_subset=["!all"] actually means
# to collect nothing except for the below list
# TODO: decide what '!all' means, I lean towards making it mean none, but likely needs
# some tweaking on how gather_subset operations are performed
minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
'distribution', 'dns', 'env', 'fips', 'local',
'lsb', 'pkg_mgr', 'platform', 'python', 'selinux',
'service_mgr', 'ssh_pub_keys', 'user'])
all_collector_classes = default_collectors.collectors
# rename namespace_name to root_key?
namespace = PrefixFactNamespace(namespace_name='ansible',
prefix='ansible_')
fact_collector = \
ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
namespace=namespace,
filter_spec=filter_spec,
gather_subset=gather_subset,
gather_timeout=gather_timeout,
minimal_gather_subset=minimal_gather_subset)
facts_dict = fact_collector.collect(module=module)
module.exit_json(ansible_facts=facts_dict)
if __name__ == '__main__':
main()
|
raschuetz/foundations-homework
|
refs/heads/master
|
07/data-analysis/lib/python3.5/site-packages/pip/req/__init__.py
|
806
|
from __future__ import absolute_import
from .req_install import InstallRequirement
from .req_set import RequirementSet, Requirements
from .req_file import parse_requirements
__all__ = [
"RequirementSet", "Requirements", "InstallRequirement",
"parse_requirements",
]
|
mcking49/apache-flask
|
refs/heads/master
|
Python/Lib/site-packages/pip/_vendor/html5lib/treebuilders/_base.py
|
915
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
|
marckuz/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_erroneous/6_auto.py
|
770
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("migrations", "5_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
golismero/golismero-devel
|
refs/heads/master
|
thirdparty_libs/django/views/decorators/http.py
|
228
|
"""
Decorators for views based on HTTP headers.
"""
import logging
from calendar import timegm
from functools import wraps
from django.utils.decorators import decorator_from_middleware, available_attrs
from django.utils.http import http_date, parse_http_date_safe, parse_etags, quote_etag
from django.middleware.http import ConditionalGetMiddleware
from django.http import HttpResponseNotAllowed, HttpResponseNotModified, HttpResponse
conditional_page = decorator_from_middleware(ConditionalGetMiddleware)
logger = logging.getLogger('django.request')
def require_http_methods(request_method_list):
"""
Decorator to make a view only accept particular request methods. Usage::
@require_http_methods(["GET", "POST"])
def my_view(request):
# I can assume now that only GET or POST requests make it this far
# ...
Note that request methods should be in uppercase.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kwargs):
if request.method not in request_method_list:
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': request
}
)
return HttpResponseNotAllowed(request_method_list)
return func(request, *args, **kwargs)
return inner
return decorator
require_GET = require_http_methods(["GET"])
require_GET.__doc__ = "Decorator to require that a view only accept the GET method."
require_POST = require_http_methods(["POST"])
require_POST.__doc__ = "Decorator to require that a view only accept the POST method."
require_safe = require_http_methods(["GET", "HEAD"])
require_safe.__doc__ = "Decorator to require that a view only accept safe methods: GET and HEAD."
def condition(etag_func=None, last_modified_func=None):
"""
Decorator to support conditional retrieval (or change) for a view
function.
The parameters are callables to compute the ETag and last modified time for
the requested resource, respectively. The callables are passed the same
parameters as the view itself. The Etag function should return a string (or
None if the resource doesn't exist), whilst the last_modified function
should return a datetime object (or None if the resource doesn't exist).
If both parameters are provided, all the preconditions must be met before
the view is processed.
This decorator will either pass control to the wrapped view function or
return an HTTP 304 response (unmodified) or 412 response (preconditions
failed), depending upon the request method.
Any behavior marked as "undefined" in the HTTP spec (e.g. If-none-match
plus If-modified-since headers) will result in the view function being
called.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kwargs):
# Get HTTP request headers
if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE")
if if_modified_since:
if_modified_since = parse_http_date_safe(if_modified_since)
if_none_match = request.META.get("HTTP_IF_NONE_MATCH")
if_match = request.META.get("HTTP_IF_MATCH")
if if_none_match or if_match:
# There can be more than one ETag in the request, so we
# consider the list of values.
try:
etags = parse_etags(if_none_match or if_match)
except ValueError:
# In case of invalid etag ignore all ETag headers.
# Apparently Opera sends invalidly quoted headers at times
# (we should be returning a 400 response, but that's a
# little extreme) -- this is Django bug #10681.
if_none_match = None
if_match = None
# Compute values (if any) for the requested resource.
if etag_func:
res_etag = etag_func(request, *args, **kwargs)
else:
res_etag = None
if last_modified_func:
dt = last_modified_func(request, *args, **kwargs)
if dt:
res_last_modified = timegm(dt.utctimetuple())
else:
res_last_modified = None
else:
res_last_modified = None
response = None
if not ((if_match and (if_modified_since or if_none_match)) or
(if_match and if_none_match)):
# We only get here if no undefined combinations of headers are
# specified.
if ((if_none_match and (res_etag in etags or
"*" in etags and res_etag)) and
(not if_modified_since or
(res_last_modified and if_modified_since and
res_last_modified <= if_modified_since))):
if request.method in ("GET", "HEAD"):
response = HttpResponseNotModified()
else:
logger.warning('Precondition Failed: %s', request.path,
extra={
'status_code': 412,
'request': request
}
)
response = HttpResponse(status=412)
elif if_match and ((not res_etag and "*" in etags) or
(res_etag and res_etag not in etags)):
logger.warning('Precondition Failed: %s', request.path,
extra={
'status_code': 412,
'request': request
}
)
response = HttpResponse(status=412)
elif (not if_none_match and request.method == "GET" and
res_last_modified and if_modified_since and
res_last_modified <= if_modified_since):
response = HttpResponseNotModified()
if response is None:
response = func(request, *args, **kwargs)
# Set relevant headers on the response if they don't already exist.
if res_last_modified and not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date(res_last_modified)
if res_etag and not response.has_header('ETag'):
response['ETag'] = quote_etag(res_etag)
return response
return inner
return decorator
# Shortcut decorators for common cases based on ETag or Last-Modified only
def etag(etag_func):
return condition(etag_func=etag_func)
def last_modified(last_modified_func):
return condition(last_modified_func=last_modified_func)
|
banebg/embedded
|
refs/heads/master
|
blog/login/models.py
|
792
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
|
nitin-cherian/Webapps
|
refs/heads/master
|
TalkPython/P4E/my_web_app/.env/lib/python3.5/site-packages/pip/__main__.py
|
834
|
from __future__ import absolute_import
import os
import sys
# If we are running from a wheel, add the wheel to sys.path
# This allows the usage python pip-*.whl/pip install pip-*.whl
if __package__ == '':
# __file__ is pip-*.whl/pip/__main__.py
# first dirname call strips of '/__main__.py', second strips off '/pip'
# Resulting path is the name of the wheel itself
# Add that to sys.path so we can import pip
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
import pip # noqa
if __name__ == '__main__':
sys.exit(pip.main())
|
DualSpark/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/fetch.py
|
15
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import random
import traceback
import tempfile
import base64
from ansible import constants as C
from ansible.errors import *
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
from ansible.utils.path import makedirs_safe
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' handler for fetch operations '''
if self._connection_info.check_mode:
return dict(skipped=True, msg='check mode not (yet) supported for this module')
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
flat = boolean(self._task.args.get('flat'))
fail_on_missing = boolean(self._task.args.get('fail_on_missing'))
validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5')))
if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args:
return dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified")
if source is None or dest is None:
return dict(failed=True, msg="src and dest are required")
source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source, tmp)
# calculate checksum for the remote file
remote_checksum = self._remote_checksum(tmp, source)
# use slurp if sudo and permissions are lacking
remote_data = None
if remote_checksum in ('1', '2') or self._connection_info.become:
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
if slurpres.get('rc') == 0:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
remote_checksum = checksum_s(remote_data)
# the source path may have been expanded on the
# target system, so we compare it here and use the
# expanded version if it's different
remote_source = slurpres.get('source')
if remote_source and remote_source != source:
source = remote_source
else:
# FIXME: should raise an error here? the old code did nothing
pass
# calculate the destination name
if os.path.sep not in self._connection._shell.join_path('a', ''):
source_local = source.replace('\\', '/')
else:
source_local = source
dest = os.path.expanduser(dest)
if flat:
if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = self._loader.path_dwim(dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
if 'inventory_hostname' in task_vars:
target_name = task_vars['inventory_hostname']
else:
target_name = self._connection_info.remote_addr
dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
dest = dest.replace("//","/")
if remote_checksum in ('0', '1', '2', '3', '4'):
# these don't fail because you may want to transfer a log file that possibly MAY exist
# but keep going to fetch other log files
if remote_checksum == '0':
result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False)
elif remote_checksum == '1':
if fail_on_missing:
result = dict(failed=True, msg="the remote file does not exist", file=source)
else:
result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
elif remote_checksum == '2':
result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False)
elif remote_checksum == '3':
result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False)
elif remote_checksum == '4':
result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False)
return result
# calculate checksum for the local file
local_checksum = checksum(dest)
if remote_checksum != local_checksum:
# create the containing directories, if needed
makedirs_safe(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
self._connection.fetch_file(source, dest)
else:
f = open(dest, 'w')
f.write(remote_data)
f.close()
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
return dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
return dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
else:
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
return dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
|
shail2810/nova
|
refs/heads/master
|
nova/compute/task_states.py
|
96
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible task states for instances.
Compute instance task states represent what is happening to the instance at the
current moment. These tasks can be generic, such as 'spawning', or specific,
such as 'block_device_mapping'. These task states allow for a better view into
what an instance is doing and should be displayed to users/administrators as
necessary.
"""
# possible task states during create()
SCHEDULING = 'scheduling'
BLOCK_DEVICE_MAPPING = 'block_device_mapping'
NETWORKING = 'networking'
SPAWNING = 'spawning'
# possible task states during snapshot()
IMAGE_SNAPSHOT = 'image_snapshot'
IMAGE_SNAPSHOT_PENDING = 'image_snapshot_pending'
IMAGE_PENDING_UPLOAD = 'image_pending_upload'
IMAGE_UPLOADING = 'image_uploading'
# possible task states during backup()
IMAGE_BACKUP = 'image_backup'
# possible task states during set_admin_password()
UPDATING_PASSWORD = 'updating_password'
# possible task states during resize()
RESIZE_PREP = 'resize_prep'
RESIZE_MIGRATING = 'resize_migrating'
RESIZE_MIGRATED = 'resize_migrated'
RESIZE_FINISH = 'resize_finish'
# possible task states during revert_resize()
RESIZE_REVERTING = 'resize_reverting'
# possible task states during confirm_resize()
RESIZE_CONFIRMING = 'resize_confirming'
# possible task states during reboot()
REBOOTING = 'rebooting'
REBOOT_PENDING = 'reboot_pending'
REBOOT_STARTED = 'reboot_started'
REBOOTING_HARD = 'rebooting_hard'
REBOOT_PENDING_HARD = 'reboot_pending_hard'
REBOOT_STARTED_HARD = 'reboot_started_hard'
# possible task states during pause()
PAUSING = 'pausing'
# possible task states during unpause()
UNPAUSING = 'unpausing'
# possible task states during suspend()
SUSPENDING = 'suspending'
# possible task states during resume()
RESUMING = 'resuming'
# possible task states during power_off()
POWERING_OFF = 'powering-off'
# possible task states during power_on()
POWERING_ON = 'powering-on'
# possible task states during rescue()
RESCUING = 'rescuing'
# possible task states during unrescue()
UNRESCUING = 'unrescuing'
# possible task states during rebuild()
REBUILDING = 'rebuilding'
REBUILD_BLOCK_DEVICE_MAPPING = "rebuild_block_device_mapping"
REBUILD_SPAWNING = 'rebuild_spawning'
# possible task states during live_migrate()
MIGRATING = "migrating"
# possible task states during delete()
DELETING = 'deleting'
# possible task states during soft_delete()
SOFT_DELETING = 'soft-deleting'
# possible task states during restore()
RESTORING = 'restoring'
# possible task states during shelve()
SHELVING = 'shelving'
SHELVING_IMAGE_PENDING_UPLOAD = 'shelving_image_pending_upload'
SHELVING_IMAGE_UPLOADING = 'shelving_image_uploading'
# possible task states during shelve_offload()
SHELVING_OFFLOADING = 'shelving_offloading'
# possible task states during unshelve()
UNSHELVING = 'unshelving'
|
affan2/django-activity-stream
|
refs/heads/master
|
example_project/registration/__init__.py
|
30
|
VERSION = (0, 8, 0, 'alpha', 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s %s' % (version, VERSION[3], VERSION[4])
return version
|
sidartaoliveira/ansible
|
refs/heads/devel
|
lib/ansible/plugins/callback/profile_tasks.py
|
34
|
# (C) 2016, Joel, http://github.com/jjshoe
# (C) 2015, Tom Paine, <github@aioue.net>
# (C) 2014, Jharrod LaFon, @JharrodLaFon
# (C) 2012-2013, Michael DeHaan, <michael.dehaan@gmail.com>
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# File is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# See <http://www.gnu.org/licenses/> for a copy of the
# GNU General Public License
# Provides per-task timing, ongoing playbook elapsed time and
# ordered list of top 20 longest running tasks at end
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import time
from ansible.module_utils.six.moves import reduce
from ansible.plugins.callback import CallbackBase
# define start time
t0 = tn = time.time()
def secondsToStr(t):
# http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds
rediv = lambda ll, b: list(divmod(ll[0], b)) + ll[1:]
return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60, 60]))
def filled(msg, fchar="*"):
if len(msg) == 0:
width = 79
else:
msg = "%s " % msg
width = 79 - len(msg)
if width < 3:
width = 3
filler = fchar * width
return "%s%s " % (msg, filler)
def timestamp(self):
if self.current is not None:
self.stats[self.current]['time'] = time.time() - self.stats[self.current]['time']
def tasktime():
global tn
time_current = time.strftime('%A %d %B %Y %H:%M:%S %z')
time_elapsed = secondsToStr(time.time() - tn)
time_total_elapsed = secondsToStr(time.time() - t0)
tn = time.time()
return filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed))
class CallbackModule(CallbackBase):
"""
This callback module provides per-task timing, ongoing playbook elapsed time
and ordered list of top 20 longest running tasks at end.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'profile_tasks'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
self.stats = collections.OrderedDict()
self.current = None
self.sort_order = os.getenv('PROFILE_TASKS_SORT_ORDER', True)
self.task_output_limit = os.getenv('PROFILE_TASKS_TASK_OUTPUT_LIMIT', 20)
if self.sort_order == 'ascending':
self.sort_order = False
if self.task_output_limit == 'all':
self.task_output_limit = None
else:
self.task_output_limit = int(self.task_output_limit)
super(CallbackModule, self).__init__()
def _record_task(self, task):
"""
Logs the start of each task
"""
self._display.display(tasktime())
timestamp(self)
# Record the start time of the current task
self.current = task._uuid
self.stats[self.current] = {'time': time.time(), 'name': task.get_name()}
if self._display.verbosity >= 2:
self.stats[self.current][ 'path'] = task.get_path()
def v2_playbook_on_task_start(self, task, is_conditional):
self._record_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._record_task(task)
def playbook_on_setup(self):
self._display.display(tasktime())
def playbook_on_stats(self, stats):
self._display.display(tasktime())
self._display.display(filled("", fchar="="))
timestamp(self)
results = self.stats.items()
# Sort the tasks by the specified sort
if self.sort_order != 'none':
results = sorted(
self.stats.items(),
key=lambda x:x[1]['time'],
reverse=self.sort_order,
)
# Display the number of tasks specified or the default of 20
results = results[:self.task_output_limit]
# Print the timings
for uuid, result in results:
msg=u"{0:-<70}{1:->9}".format(result['name'] + u' ',u' {0:.02f}s'.format(result['time']))
if 'path' in result:
msg += u"\n{0:-<79}".format(result['path'] + u' ')
self._display.display(msg)
|
iafan/zing
|
refs/heads/master
|
pytest_pootle/fixtures/models/translation_project.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
def pytest_generate_tests(metafunc):
from pootle_project.models import PROJECT_CHECKERS
if 'checkers' in metafunc.funcargnames:
metafunc.parametrize("checkers", PROJECT_CHECKERS.keys())
def _require_tp(language, project):
"""Helper to get/create a new translation project."""
from pootle_translationproject.models import create_translation_project
return create_translation_project(language, project)
@pytest.fixture
def afrikaans_tutorial(afrikaans, tutorial):
"""Require Afrikaans Tutorial."""
return _require_tp(afrikaans, tutorial)
@pytest.fixture
def en_tutorial_obsolete(english_tutorial):
"""Require Arabic Tutorial in obsolete state."""
english_tutorial.directory.makeobsolete()
return english_tutorial
@pytest.fixture
def english_tutorial(english, tutorial):
"""Require English Tutorial."""
return _require_tp(english, tutorial)
@pytest.fixture
def italian_tutorial(italian, tutorial):
"""Require Italian Tutorial."""
return _require_tp(italian, tutorial)
@pytest.fixture
def tp_checker_tests(request, english, checkers):
from pytest_pootle.factories import ProjectDBFactory
checker_name = checkers
project = ProjectDBFactory(
checkstyle=checker_name,
source_language=english)
return (checker_name, project)
@pytest.fixture
def tp0(language0, project0):
"""Require English Project0."""
from pootle_translationproject.models import TranslationProject
return TranslationProject.objects.get(
language=language0,
project=project0)
|
PetePriority/home-assistant
|
refs/heads/dev
|
homeassistant/components/sensor/ruter.py
|
5
|
"""
A sensor platform that give you information about next departures from Ruter.
For more details about this platform, please refer to the documentation at
https://www.home-assistant.io/components/sensor.ruter/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.aiohttp_client import async_get_clientsession
REQUIREMENTS = ['pyruter==1.1.0']
_LOGGER = logging.getLogger(__name__)
CONF_STOP_ID = 'stop_id'
CONF_DESTINATION = 'destination'
CONF_OFFSET = 'offset'
DEFAULT_NAME = 'Ruter'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_STOP_ID): cv.positive_int,
vol.Optional(CONF_DESTINATION): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OFFSET, default=0): cv.positive_int,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Create the sensor."""
from pyruter.api import Departures
_LOGGER.warning("The API used in this sensor is shutting down soon, "
"you should consider starting to use the "
"'entur_public_transport' sensor instead")
stop_id = config[CONF_STOP_ID]
destination = config.get(CONF_DESTINATION)
name = config[CONF_NAME]
offset = config[CONF_OFFSET]
session = async_get_clientsession(hass)
ruter = Departures(hass.loop, stop_id, destination, session)
sensor = [RuterSensor(ruter, name, offset)]
async_add_entities(sensor, True)
class RuterSensor(Entity):
"""Representation of a Ruter sensor."""
def __init__(self, ruter, name, offset):
"""Initialize the sensor."""
self.ruter = ruter
self._attributes = {}
self._name = name
self._offset = offset
self._state = None
async def async_update(self):
"""Get the latest data from the Ruter API."""
await self.ruter.get_departures()
if self.ruter.departures is None:
_LOGGER.error("No data recieved from Ruter.")
return
try:
data = self.ruter.departures[self._offset]
self._state = data['time']
self._attributes['line'] = data['line']
self._attributes['destination'] = data['destination']
except (KeyError, IndexError) as error:
_LOGGER.debug("Error getting data from Ruter, %s", error)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return 'mdi:bus'
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
|
MyRookie/SentimentAnalyse
|
refs/heads/master
|
venv/lib/python2.7/site-packages/nltk/tag/brill.py
|
3
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Marcus Uneson <marcus.uneson@gmail.com>
# based on previous (nltk2) version by
# Christopher Maloof, Edward Loper, Steven Bird
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, division
from collections import defaultdict
from nltk.compat import Counter
from nltk.tag import TaggerI
from nltk.tbl import Feature, Template
from nltk import jsontags
######################################################################
# Brill Templates
######################################################################
@jsontags.register_tag
class Word(Feature):
"""
Feature which examines the text (word) of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Word'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's text."""
return tokens[index][0]
@jsontags.register_tag
class Pos(Feature):
"""
Feature which examines the tags of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Pos'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's tag."""
return tokens[index][1]
def nltkdemo18():
"""
Return 18 templates, from the original nltk demo, in multi-feature syntax
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-3, -2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-1]), Word([1])),
]
def nltkdemo18plus():
"""
Return 18 templates, from the original nltk demo, and additionally a few
multi-feature ones (the motivation is easy comparison with nltkdemo18)
"""
return nltkdemo18() + [
Template(Word([-1]), Pos([1])),
Template(Pos([-1]), Word([1])),
Template(Word([-1]), Word([0]), Pos([1])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-1]), Word([0]), Pos([1])),
]
def fntbl37():
"""
Return 37 templates taken from the postagging task of the
fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/
(37 is after excluding a handful which do not condition on Pos[0];
fntbl can do that but the current nltk implementation cannot.)
"""
return [
Template(Word([0]), Word([1]), Word([2])),
Template(Word([-1]), Word([0]), Word([1])),
Template(Word([0]), Word([-1])),
Template(Word([0]), Word([1])),
Template(Word([0]), Word([2])),
Template(Word([0]), Word([-2])),
Template(Word([1, 2])),
Template(Word([-2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-3, -2, -1])),
Template(Word([0]), Pos([2])),
Template(Word([0]), Pos([-2])),
Template(Word([0]), Pos([1])),
Template(Word([0]), Pos([-1])),
Template(Word([0])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([1])),
Template(Word([-1])),
Template(Pos([-1]), Pos([1])),
Template(Pos([1]), Pos([2])),
Template(Pos([-1]), Pos([-2])),
Template(Pos([1])),
Template(Pos([-1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([1, 2, 3])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([-2, -1])),
Template(Pos([1]), Word([0]), Word([1])),
Template(Pos([1]), Word([0]), Word([-1])),
Template(Pos([-1]), Word([-1]), Word([0])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Pos([1]), Pos([2]), Word([1]))
]
def brill24():
"""
Return 24 templates of the seminal TBL paper, Brill (1995)
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-1, 0])),
Template(Word([0, 1])),
Template(Word([0])),
Template(Word([-1]), Pos([-1])),
Template(Word([1]), Pos([1])),
Template(Word([0]), Word([-1]), Pos([-1])),
Template(Word([0]), Word([1]), Pos([1])),
]
def describe_template_sets():
"""
Print the available template sets in this demo, with a short description"
"""
import inspect
import sys
# a bit of magic to get all functions in this module
templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
for (name, obj) in templatesets:
if name == "describe_template_sets":
continue
print(name, obj.__doc__, "\n")
######################################################################
# The Brill Tagger
######################################################################
@jsontags.register_tag
class BrillTagger(TaggerI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
initial tagger (such as ``tag.DefaultTagger``) to assign an initial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the ``TagRule``
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using one
of the TaggerTrainers available.
"""
json_tag = 'nltk.tag.BrillTagger'
def __init__(self, initial_tagger, rules, training_stats=None):
"""
:param initial_tagger: The initial tagger
:type initial_tagger: TaggerI
:param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
:type rules: list(TagRule)
:param training_stats: A dictionary of statistics collected
during training, for possible later use
:type training_stats: dict
"""
self._initial_tagger = initial_tagger
self._rules = tuple(rules)
self._training_stats = training_stats
def encode_json_obj(self):
return self._initial_tagger, self._rules, self._training_stats
@classmethod
def decode_json_obj(cls, obj):
_initial_tagger, _rules, _training_stats = obj
return cls(_initial_tagger, _rules, _training_stats)
def rules(self):
"""
Return the ordered list of transformation rules that this tagger has learnt
:return: the ordered list of transformation rules that correct the initial tagging
:rtype: list of Rules
"""
return self._rules
def train_stats(self, statistic=None):
"""
Return a named statistic collected during training, or a dictionary of all
available statistics if no name given
:param statistic: name of statistic
:type statistic: str
:return: some statistic collected during training of this tagger
:rtype: any (but usually a number)
"""
if statistic is None:
return self._training_stats
else:
return self._training_stats.get(statistic)
def tag(self, tokens):
# Inherit documentation from TaggerI
# Run the initial tagger.
tagged_tokens = self._initial_tagger.tag(tokens)
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = defaultdict(set)
for i, (token, tag) in enumerate(tagged_tokens):
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag, [])
# Apply the rule at those positions.
changed = rule.apply(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag].remove(i)
tag_to_positions[rule.replacement_tag].add(i)
return tagged_tokens
def print_template_statistics(self, test_stats=None, printunused=True):
"""
Print a list of all templates, ranked according to efficiency.
If test_stats is available, the templates are ranked according to their
relative contribution (summed for all rules created from a given template,
weighted by score) to the performance on the test set. If no test_stats, then
statistics collected during training are used instead. There is also
an unweighted measure (just counting the rules). This is less informative,
though, as many low-score rules will appear towards end of training.
:param test_stats: dictionary of statistics collected during testing
:type test_stats: dict of str -> any (but usually numbers)
:param printunused: if True, print a list of all unused templates
:type printunused: bool
:return: None
:rtype: None
"""
tids = [r.templateid for r in self._rules]
train_stats = self.train_stats()
trainscores = train_stats['rulescores']
assert len(trainscores) == len(tids), "corrupt statistics: " \
"{0} train scores for {1} rules".format(trainscores, tids)
template_counts = Counter(tids)
weighted_traincounts = Counter()
for (tid, score) in zip(tids, trainscores):
weighted_traincounts[tid] += score
tottrainscores = sum(trainscores)
# det_tplsort() is for deterministic sorting;
# the otherwise convenient Counter.most_common() unfortunately
# does not break ties deterministically
# between python versions and will break cross-version tests
def det_tplsort(tpl_value):
return (tpl_value[1], repr(tpl_value[0]))
def print_train_stats():
print("TEMPLATE STATISTICS (TRAIN) {0} templates, {1} rules)".format(
len(template_counts),
len(tids))
)
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
head = "#ID | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
train_tplscores = sorted(weighted_traincounts.items(), key=det_tplsort, reverse=True)
for (tid, trainscore) in train_tplscores:
s = "{0} | {1:5d} {2:5.3f} |{3:4d} {4:.3f} | {5}".format(
tid,
trainscore,
trainscore/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_testtrain_stats():
testscores = test_stats['rulescores']
print("TEMPLATE STATISTICS (TEST AND TRAIN) ({0} templates, {1} rules)".format(
len(template_counts),
len(tids)),
)
print("TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats))
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
weighted_testcounts = Counter()
for (tid, score) in zip(tids, testscores):
weighted_testcounts[tid] += score
tottestscores = sum(testscores)
head = "#ID | Score (test) | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
test_tplscores = sorted(weighted_testcounts.items(), key=det_tplsort, reverse=True)
for (tid, testscore) in test_tplscores:
s = "{0:s} |{1:5d} {2:6.3f} | {3:4d} {4:.3f} |{5:4d} {6:.3f} | {7:s}".format(
tid,
testscore,
testscore/tottestscores,
weighted_traincounts[tid],
weighted_traincounts[tid]/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_unused_templates():
usedtpls = set([int(tid) for tid in tids])
unused = [(tid, tpl) for (tid, tpl) in enumerate(Template.ALLTEMPLATES) if tid not in usedtpls]
print("UNUSED TEMPLATES ({0})".format(len(unused)))
for (tid, tpl) in unused:
print("{0:03d} {1:s}".format(tid, tpl))
if test_stats is None:
print_train_stats()
else:
print_testtrain_stats()
print()
if printunused:
print_unused_templates()
print()
def batch_tag_incremental(self, sequences, gold):
"""
Tags by applying each rule to the entire corpus (rather than all rules to a
single sequence). The point is to collect statistics on the test set for
individual rules.
NOTE: This is inefficient (does not build any index, so will traverse the entire
corpus N times for N rules) -- usually you would not care about statistics for
individual rules and thus use batch_tag() instead
:param sequences: lists of token sequences (sentences, in some applications) to be tagged
:type sequences: list of list of strings
:param gold: the gold standard
:type gold: list of list of strings
:returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule))
"""
def counterrors(xs):
return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair))
testing_stats = {}
testing_stats['tokencount'] = sum(len(t) for t in sequences)
testing_stats['sequencecount'] = len(sequences)
tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences]
testing_stats['initialerrors'] = counterrors(tagged_tokenses)
testing_stats['initialacc'] = 1 - testing_stats['initialerrors']/testing_stats['tokencount']
# Apply each rule to the entire corpus, in order
errors = [testing_stats['initialerrors']]
for rule in self._rules:
for tagged_tokens in tagged_tokenses:
rule.apply(tagged_tokens)
errors.append(counterrors(tagged_tokenses))
testing_stats['rulescores'] = [err0 - err1 for (err0, err1) in zip(errors, errors[1:])]
testing_stats['finalerrors'] = errors[-1]
testing_stats['finalacc'] = 1 - testing_stats['finalerrors']/testing_stats['tokencount']
return (tagged_tokenses, testing_stats)
|
b-me/django
|
refs/heads/master
|
django/conf/locale/fy/formats.py
|
852
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
RTS2/rts2
|
refs/heads/master
|
scripts/u_point/u_model.py
|
3
|
#!/usr/bin/env python3
# (C) 2016, Markus Wildi, wildi.markus@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
'''
Calculate the parameters for pointing model J.Condon (1992)
AltAz: Astropy N=0,E=pi/2, Libnova S=0,W=pi/1
'''
__author__ = 'wildi.markus@bluewin.ch'
import sys
import argparse
import logging
import os
import importlib
import numpy as np
import scipy.optimize
import pandas as pd
from astropy import units as u
from astropy.coordinates import SkyCoord,EarthLocation
from astropy.coordinates import Longitude,Latitude,Angle
from astropy.coordinates.representation import SphericalRepresentation
from astropy.utils import iers
# astropy pre 1.2.1 may not work correctly
# wget http://maia.usno.navy.mil/ser7/finals2000A.all
# together with IERS_A_FILE
try:
iers.IERS.iers_table = iers.IERS_A.open(iers.IERS_A_FILE)
# ###########
except:
print('download:')
print('wget http://maia.usno.navy.mil/ser7/finals2000A.all')
sys.exit(1)
from u_point.structures import Point,Parameter
from u_point.callback import AnnoteFinder
from u_point.callback import AnnotatedPlot
from u_point.script import Script
# find out how caching works
#from astropy.utils import iers
#iers.IERS.iers_table = iers.IERS_A.open(iers.IERS_A_URL)
class PointingModel(Script):
def __init__(self, lg=None,break_after=None, base_path=None, obs=None,analyzed_positions=None,fit_sxtr=None):
Script.__init__(self,lg=lg,break_after=break_after,base_path=base_path,obs=obs,analyzed_positions=analyzed_positions)
#
self.fit_sxtr=fit_sxtr
self.transform_name=None
self.refraction_method=None
def fetch_coordinates(self,ptfn=None):
self.fetch_positions(sys_exit=True,analyzed=True)
# old irait data do not enable
#self.fetch_mount_meteo(sys_exit=True,analyzed=True,with_nml_id=False)
cats=list()
mnts=list()
imgs=list()
nmls=list()
if len(self.sky_anl)==0:
self.lg.error('fetch_coordinates: nothing to analyze, exiting')
sys.exit(1)
self.eq_mount=False
if self.sky_anl[0].eq_mount:
self.eq_mount=True
self.transform_name=None
if self.sky_anl[0].transform_name:
self.transform_name=self.sky_anl[0].transform_name
logger.info('transformation done with: {}'.format(self.transform_name))
self.refraction_method=None
if self.sky_anl[0].refraction_method:
self.refraction_method=self.sky_anl[0].refraction_method
# ToDo
#logger.info('refraction_method: {}'.format(self.refraction_method))
for i,sky in enumerate(self.sky_anl):
if i > self.break_after:
break
if self.fit_sxtr:
if sky.mnt_ll_sxtr is None:
continue
mnt_ll=sky.mnt_ll_sxtr
else:
if sky.mnt_ll_astr is None:
continue
mnt_ll=sky.mnt_ll_astr
cats.append(sky.cat_ll_ap)
mnts.append(mnt_ll)
imgs.append(sky.image_fn)
nmls.append(sky.nml_id)
return cats,mnts,imgs,nmls
# fit projections with a gaussian
def fit_projection_helper(self,function=None, parameters=None, y=None, x = None):
def local_f(params):
for i,p in enumerate(parameters):
p.set(params[i])
return y - function(x)
p = [param() for param in parameters]
return scipy.optimize.leastsq(local_f, p)
def fit_gaussian(self,x):
return self.height() * np.exp(-(x-self.mu())**2/2./self.sigma()**2) #+ background()
def fit_projection_and_plot(self,vals=None, bins=None, axis=None,fit_title=None,fn_frac=None,prefix=None,plt_no=None,plt=None):
'''
Fit the projections of differences (not corrected data) as well as the residues (lat,lon coordinates).
To compare differences ('1') and residues ('2') the plot titles are labled with a letter and '1' and '2'.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
cnt_bins, rbins, patches = ax.hist(vals,bins,normed=True, facecolor='green', alpha=0.75)
bins=rbins[:-1] #ToDO why?
# estimator, width
wd=np.sqrt(np.abs(np.sum((bins-cnt_bins)**2*cnt_bins)/np.sum(cnt_bins)))
self.mu=Parameter(0.)
self.sigma=Parameter(wd) #arcsec!
self.height=Parameter(cnt_bins.max())
background=Parameter(0.)
parameters=[self.mu,self.sigma,self.height]# ToDo a bit a contradiction
res,stat=self.fit_projection_helper(function=self.fit_gaussian,parameters=parameters, y=cnt_bins, x=bins)
if stat != 1:
self.lg.warn('fit projection not converged, status: {}'.format(stat))
y=self.fit_gaussian(bins)
l = ax.plot(bins, y, 'r--', linewidth=2)
ax.set_xlabel('{} {} [arcsec]'.format(prefix,axis))
ax.set_ylabel('number of events normalized')
if prefix in 'difference': # ToDo ugly
# as of 2017-03-18 TeX does not work
#ax.set_title(r'{0} {1} {2} $\mu$={3:.2f},$\sigma$={4:.2f} [arcsec] \n catalog_not_corrected - star'.format(plt_no,prefix,axis,self.mu(), self.sigma()))
ax.set_title(r'{0} {1} {2} mu={3:.2f},sigma={4:.2f} [arcsec] \n catalog_not_corrected - star'.format(plt_no,prefix,axis,self.mu(), self.sigma()))
fn_ftmp=fn_frac.replace(' ','_').replace('+','_')
axis_tmp=axis.replace(' ','_').replace('+','_')
fig.savefig(os.path.join(self.base_path,'{}_catalog_not_corrected_projection_{}_{}.png'.format(prefix,axis_tmp,fn_ftmp)))
else:
#ax.set_title(r'{0} {1} {2} $\mu$={3:.2f},$\sigma$={4:.2f} [arcsec], fit: {5}'.format(plt_no,prefix,axis,self.mu(), self.sigma(),fit_title))
ax.set_title(r'{0} {1} {2} mu={3:.2f},sigma={4:.2f} [arcsec], fit: {5}'.format(plt_no,prefix,axis,self.mu(), self.sigma(),fit_title))
#ToDo ugly
fn_ftmp=fn_frac.replace(' ','_').replace('+','_')
axis_tmp=axis.replace(' ','_').replace('+','_')
fig.savefig(os.path.join(self.base_path,'{}_projection_{}_{}.png'.format(prefix,axis_tmp,fn_ftmp)))
def prepare_plot(self, cats=None,mnts=None,imgs=None,nmls=None,selected=None,model=None):
stars=list()
for i, ct in enumerate(cats):
if not i in selected:
#self.lg.debug('star: {} dropped'.format(i))
continue
mt=mnts[i] # readability
mts=mt.represent_as(SphericalRepresentation)
# ToDo may not the end of the story
cts=ct.represent_as(SphericalRepresentation)
df_lon= Longitude(cts.lon.radian-mts.lon.radian,u.radian, wrap_angle=Angle(np.pi,u.radian))
df_lat= Latitude(cts.lat.radian-mts.lat.radian,u.radian)
#print(df_lat,df_lon)
#if df_lat.radian < 0./60./180.*np.pi:
# pass
#elif df_lat.radian > 20./60./180.*np.pi:
# pass
#else:
# continue
# residuum: difference st.cats(fit corrected) - st.star
#
res_lon=Longitude(
float(model.d_lon(cts.lon.radian,cts.lat.radian,cts.lon.radian-mts.lon.radian)),
u.radian,
wrap_angle=Angle(np.pi,u.radian))
res_lat=Latitude(
float(model.d_lat(cts.lon.radian,cts.lat.radian,cts.lat.radian-mts.lat.radian)),u.radian)
try:
image_fn=imgs[i]
except:
image_fn='no image file'
try:
nml_id=nmls[i]
except:
nml_id='no nml_id'
st=Point(
cat_lon=cts.lon,
cat_lat=cts.lat,
mnt_lon=mts.lon,
mnt_lat=mts.lat,
df_lat=df_lat,
df_lon=df_lon,
res_lat=res_lat,
res_lon=res_lon,
image_fn=image_fn,
nml_id=nml_id,
)
stars.append(st)
return stars
def annotate_plot(self,fig=None,ax=None,aps=None,ds9_display=None,delete=None):
af = AnnoteFinder(
ax=ax, # leading plot, put it on the list
aps=aps,
xtol=1.,
ytol=1.,
ds9_display=ds9_display,
lg=self.lg,
annotate_fn=False,
analyzed=True,
delete_one=self.delete_one_position,)
return af
def create_plot(self,fig=None,ax=None,title=None,xlabel=None,ylabel=None,lon=None,lat=None,fn=None):
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.grid(True)
ax.scatter(lon,lat,picker=20)
fig.savefig(os.path.join(self.base_path,fn.replace(' ','_').replace('+','_')))
def plot_results(self, stars=None,args=None):
'''
Plot the differences and residues as a function of lon,lat or dlon,dlat or a combination of them.
'''
import matplotlib
# this varies from distro to distro:
matplotlib.rcParams["backend"] = "TkAgg"
import matplotlib.pyplot as plt
#matplotlib.rc('text', usetex = True)
#matplotlib.rc('font', **{'family':"sans-serif"})
#params = {'text.latex.preamble': [r'\usepackage{siunitx}',
# r'\usepackage{sfmath}', r'\sisetup{detect-family = true}',
# r'\usepackage{amsmath}']}
#plt.rcParams.update(params)
plt.ioff()
fit_title=model.fit_title
if args.fit_plus_poly:
fit_title +='C+PP'
frag='_' + self.transform_name.upper()[0:2]
fit_title += frag
sx='_AS'
if args.fit_sxtr:
sx='_SX'
fit_title += sx
frag= '_'+ self.refraction_method.upper()[0:2]
fit_title += frag
fn_frac=fit_title
if args.fit_plus_poly:
fn_frac+='c_plus_poly'
if self.eq_mount:
lon_label='hour angle'
lat_label='declination'
else:
lat_label='altitude'
lon_label='azimuth N=0,E=90'
if 'nova' in self.transform_name:
lon_label='azimuth S=0,W=90'
az_cat_deg=[x.cat_lon.degree for x in stars]
alt_cat_deg=[x.cat_lat.degree for x in stars]
plots=list()
elements=list()
elements.append('A1 difference: catalog_not_corrected - star')
elements.append('d({}) [arcmin]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=[x.df_lon.arcmin for x in stars]
lat=[x.df_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('difference_catalog_not_corrected_star{0}.png'.format(fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.df_lon.arcmin,x.df_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('B1 difference {}: catalog_not_corrected - star'.format(lon_label))
elements.append('{} [deg]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lon_label))
lon=az_cat_deg
lat=[x.df_lon.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('difference_{0}_d{0}_catalog_not_corrected_star{1}.png'.format(lon_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.df_lon.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('C1 difference {}: catalog_not_corrected - star'.format(lat_label))
elements.append('{} [deg]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=az_cat_deg
lat=[x.df_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('difference_{0}_d{1}_catalog_not_corrected_star{2}.png'.format(lon_label,lat_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.df_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('D1 difference {}: catalog_not_corrected - star'.format(lon_label))
elements.append('{} [deg]'.format(lat_label))
elements.append('d({}) [arcmin]'.format(lon_label))
lon=alt_cat_deg
lat=[x.df_lon.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
# ToDo: think about that:
#ax.scatter(alt_cat_deg ,[x.df_lon.arcmin/ np.tan(x.mnt_lat.radian) for x in stars])
elements.append('difference_{0}_d{1}_catalog_not_corrected_star{2}.png'.format(lat_label,lon_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lat.degree,x.df_lon.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('E1 difference {}: catalog_not_corrected - star'.format(lat_label))
elements.append('{} [deg]'.format(lat_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=alt_cat_deg
lat=[x.df_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('difference_{0}_d{0}_catalog_not_corrected_star{1}.png'.format(lat_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lat.degree,x.df_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
## residuum, ax05 is below
elements.append('A2 residuum: catalog_corrected - star {}'.format(fit_title))
elements.append('d({}) [arcmin]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=[x.res_lon.arcmin for x in stars]
lat=[x.res_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_catalog_corrected_star_{}.png'.format(fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.res_lon.arcmin,x.res_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('B2 residuum {} catalog_corrected - star, fit: {}'.format(lon_label,fit_title))
elements.append('{} [deg]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lon_label))
lon=az_cat_deg
lat=[x.res_lon.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_{0}_d{0}_catalog_corrected_star_{1}.png'.format(lon_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.res_lon.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('D2 residuum {} catalog_corrected - star, fit: {}'.format(lon_label,fit_title))
elements.append('{} [deg]'.format(lat_label))
elements.append('d({}) [arcmin]'.format(lon_label))
lon=alt_cat_deg
lat=[x.res_lon.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_{0}_d{1}_catalog_corrected_star_{2}.png'.format(lat_label,lon_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lat.degree,x.res_lon.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('C2 residuum {} catalog_corrected - star, fit: {}'.format(lat_label,fit_title))
elements.append('{} [deg]'.format(lon_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=az_cat_deg
lat=[x.res_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_{0}_d{1}_catalog_corrected_star_{2}.png'.format(lon_label,lat_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.res_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('E2 residuum {} catalog_corrected - star, fit: {}'.format(lat_label,fit_title))
elements.append('{} [deg]'.format(lat_label))
elements.append('d({}) [arcmin]'.format(lat_label))
lon=alt_cat_deg
lat=[x.res_lat.arcmin for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('residuum_{0}_d{0}_catalog_corrected_star_{1}.png'.format(lat_label,fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lat.degree,x.res_lat.arcmin,x.image_fn) for x in stars])
plots.append(elements)
elements=list()
elements.append('K measurement locations catalog')
elements.append('{} [deg]'.format(lon_label))
elements.append('{} [deg]'.format(lat_label))
lon=[x.cat_lon.degree for x in stars]
lat=[x.cat_lat.degree for x in stars]
elements.append(lon)
elements.append(lat)
elements.append('measurement_locations_catalog_{0}.png'.format(fn_frac))
elements.append(['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.cat_lat.degree,x.image_fn) for x in stars])
plots.append(elements)
annotes_skycoords=['{0:.1f},{1:.1f}: {2}'.format(x.cat_lon.degree,x.cat_lat.degree,x.image_fn) for x in stars]
figs=list()
axs=list()
aps=list()
nml_ids=[x.nml_id for x in stars]
# aps must be complete
for elements in plots:
lon=elements[3]
lat=elements[4]
fig = plt.figure()
ax = fig.add_subplot(111)
self.create_plot(fig=fig,ax=ax,title=elements[0],xlabel=elements[1],ylabel=elements[2],lon=lon,lat=lat,fn=elements[5])
figs.append(fig)
axs.append(ax)
# it deppends what is needed:
#annotes=elements[6]
annotes=annotes_skycoords
aps.append(AnnotatedPlot(xx=ax,nml_id=nml_ids,lon=lon,lat=lat,annotes=annotes))
afs=list()
for i,ax in enumerate(axs):
af=self.annotate_plot(fig=figs[i],ax=axs[i],aps=aps,ds9_display=args.ds9_display,delete=args.delete)
# ToDo ??removing this list inhibits call back on all but one plot
afs.append(af)
figs[i].canvas.mpl_connect('button_press_event',af.mouse_event)
if args.delete:
figs[i].canvas.mpl_connect('key_press_event',af.keyboard_event)
# ToDo why that?
#figs[0].canvas.mpl_connect('button_press_event',afs[0].mouse_event)
#if args.delete:
# figs[0].canvas.mpl_connect('key_press_event',afs[0].keyboard_event)
self.fit_projection_and_plot(vals=[x.df_lon.arcsec for x in stars], bins=args.bins,axis='{}'.format(lon_label), fit_title=fit_title,fn_frac=fn_frac,prefix='difference',plt_no='P1',plt=plt)
self.fit_projection_and_plot(vals=[x.df_lat.arcsec for x in stars], bins=args.bins,axis='{}'.format(lat_label),fit_title=fit_title,fn_frac=fn_frac,prefix='difference',plt_no='P2',plt=plt)
self.fit_projection_and_plot(vals=[x.res_lon.arcsec for x in stars],bins=args.bins,axis='{}'.format(lon_label), fit_title=fit_title,fn_frac=fn_frac,prefix='residuum',plt_no='Q1',plt=plt)
self.fit_projection_and_plot(vals=[x.res_lat.arcsec for x in stars],bins=args.bins,axis='{}'.format(lat_label),fit_title=fit_title,fn_frac=fn_frac,prefix='residuum',plt_no='Q2',plt=plt)
plt.show()
def select_stars(self, stars=None):
slctd=list()
drppd=list()
for i,st in enumerate(stars):
#st=Point(
# cat_lon=cat_aa.az,cat_lat=cat_aa.alt,
# mnt_lon=mnt_aa.az,mnt_lat=mnt_aa.alt,
# df_lat=df_alt,df_lon=df_az,
# res_lat=res_alt,res_lon=res_az
dist2 = st.res_lat.radian**2 + st.res_lon.radian**2
if dist2> (30./3600./180.*np.pi)**2:
slctd.append(i)
else:
drppd.append(i)
# ToDo not yet drop set()
selected=list(set(slctd))
dropped=list(set(drppd))
self.lg.debug('Number of selected stars: {} '.format(len(selected)))
return selected, dropped
# really ugly!
def arg_float(value):
if 'm' in value:
return -float(value[1:])
else:
return float(value)
if __name__ == "__main__":
parser= argparse.ArgumentParser(prog=sys.argv[0], description='Fit an AltAz or EQ pointing model')
parser.add_argument('--level', dest='level', default='INFO', help=': %(default)s, debug level')
parser.add_argument('--toconsole', dest='toconsole', action='store_true', default=False, help=': %(default)s, log to console')
parser.add_argument('--break_after', dest='break_after', action='store', default=10000000, type=int, help=': %(default)s, read max. positions, mostly used for debuging')
parser.add_argument('--base-path', dest='base_path', action='store', default='/tmp/u_point/',type=str, help=': %(default)s , directory where images are stored')
parser.add_argument('--analyzed-positions', dest='analyzed_positions', action='store', default='analyzed_positions.anl', help=': %(default)s, already observed positions')
#
parser.add_argument('--obs-longitude', dest='obs_lng', action='store', default=123.2994166666666,type=arg_float, help=': %(default)s [deg], observatory longitude + to the East [deg], negative value: m10. equals to -10.')
parser.add_argument('--obs-latitude', dest='obs_lat', action='store', default=-75.1,type=arg_float, help=': %(default)s [deg], observatory latitude [deg], negative value: m10. equals to -10.')
parser.add_argument('--obs-height', dest='obs_height', action='store', default=3237.,type=arg_float, help=': %(default)s [m], observatory height above sea level [m], negative value: m10. equals to -10.')
#
parser.add_argument('--fit-sxtr', dest='fit_sxtr', action='store_true', default=False, help=': %(default)s, True fit SExtractor results')
# group model
parser.add_argument('--model-class', dest='model_class', action='store', default='u_upoint', help=': %(default)s, specify your model, see e.g. model/altaz.py')
parser.add_argument('--fit-plus-poly', dest='fit_plus_poly', action='store_true', default=False, help=': %(default)s, True: Condon 1992 with polynom')
# group plot
parser.add_argument('--plot', dest='plot', action='store_true', default=False, help=': %(default)s, plot results')
parser.add_argument('--bins', dest='bins', action='store', default=40,type=int, help=': %(default)s, number of bins used in the projection histograms')
parser.add_argument('--ds9-display', dest='ds9_display', action='store_true', default=False, help=': %(default)s, inspect image and region with ds9')
parser.add_argument('--delete', dest='delete', action='store_true', default=False, help=': %(default)s, True: click on data point followed by keyboard <Delete> deletes selected ananlyzed point from file --analyzed-positions')
args=parser.parse_args()
if args.toconsole:
args.level='DEBUG'
if not os.path.exists(args.base_path):
os.makedirs(args.base_path)
pth, fn = os.path.split(sys.argv[0])
filename=os.path.join(args.base_path,'{}.log'.format(fn.replace('.py',''))) # ToDo datetime, name of the script
logformat= '%(asctime)s:%(name)s:%(levelname)s:%(message)s'
logging.basicConfig(filename=filename, level=args.level.upper(), format= logformat)
logger = logging.getLogger()
if args.toconsole:
# http://www.mglerner.com/blog/?p=8
soh = logging.StreamHandler(sys.stdout)
soh.setLevel(args.level)
logger.addHandler(soh)
if not os.path.exists(args.base_path):
os.makedirs(args.base_path)
obs=EarthLocation(lon=float(args.obs_lng)*u.degree, lat=float(args.obs_lat)*u.degree, height=float(args.obs_height)*u.m)
# now load model class
md = importlib.import_module('model.'+args.model_class)
logger.info('model loaded: {}'.format(args.model_class))
# required methods: fit_model, d_lon, d_lat
model=md.Model(lg=logger)
pm= PointingModel(lg=logger,break_after=args.break_after,base_path=args.base_path,obs=obs,analyzed_positions=args.analyzed_positions,fit_sxtr=args.fit_sxtr)
# cat,mnt: AltAz, or HA,dec coordinates
cats,mnts,imgs,nmls=pm.fetch_coordinates()
# check model type, mount type
if pm.eq_mount:
if 'hadec' not in model.model_type():
logger.error('u_model: model: {}, type: {}'.format(args.model_class, model.model_type()))
logger.error('u_model: specify hadec model type, exiting')
sys.exit(1)
else:
if 'altaz' not in model.model_type():
logger.error('u_model: model: {}, type: {}'.format(args.model_class, model.model_type()))
logger.error('u_model: specify altaz model type, exiting')
sys.exit(1)
if cats is None or len(cats)==0:
logger.error('u_model: nothing to analyze, exiting')
sys.exit(1)
selected=list(range(0,len(cats))) # all
if pm.eq_mount:
res=model.fit_model(cats=cats,mnts=mnts,selected=selected,obs=pm.obs)
else:
try:
res=model.fit_model(cats=cats,mnts=mnts,selected=selected,fit_plus_poly=args.fit_plus_poly)
except TypeError as e:
ptfn=pm.expand_base_path(fn=args.analyzed_positions)
logger.error('u_model: presumably empty file: {}, exception: {},exiting'.format(ptfn,e))
sys.exit(1)
stars=pm.prepare_plot(cats=cats,mnts=mnts,imgs=imgs,nmls=nmls,selected=selected,model=model)
if args.plot:
pm.plot_results(stars=stars,args=args)
# for the moment
sys.exit(1)
selected,dropped=pm.select_stars(stars=stars)
logger.info('number of selected: {}, dropped: {} '.format(len(selected),len(dropped)))
if pm.eq_mount:
res=model.fit_model(cats=cats,mnts=mnts,selected=selected,obs=pm.obs)
else:
res=model.fit_model(cats=cats,mnts=mnts,selected=selected,fit_plus_poly=args.fit_plus_poly)
stars=pm.prepare_plot(cats=cats,mnts=mnts,nmls=nmls,selected=selected,model=model)
pm.plot_results(stars=stars,args=args)
logger.info('number of selected: {}, dropped: {} '.format(len(selected),len(dropped)))
if pm.eq_mount:
res=model.fit_model(cats=cats,mnts=mnts,selected=dropped,obs=pm.obs)
else:
res=model.fit_model(cats=cats,mnts=mnts,selected=dropped,fit_plus_poly=args.fit_plus_poly)
stars=pm.prepare_plot(cats=cats,mnts=mnts,nmls=nmls,selected=dropped,model=model)
pm.plot_results(stars=stars,args=args)
sys.exit(0)
|
frewsxcv/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/wpttest.py
|
40
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
DEFAULT_TIMEOUT = 10 # seconds
LONG_TIMEOUT = 60 # seconds
import os
import mozinfo
from wptmanifest.parser import atoms
atom_reset = atoms["Reset"]
enabled_tests = set(["testharness", "reftest", "wdspec"])
class Result(object):
def __init__(self, status, message, expected=None, extra=None):
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.expected = expected
self.extra = extra
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.status)
class SubtestResult(object):
def __init__(self, name, status, message, stack=None, expected=None):
self.name = name
if status not in self.statuses:
raise ValueError("Unrecognised status %s" % status)
self.status = status
self.message = message
self.stack = stack
self.expected = expected
def __repr__(self):
return "<%s.%s %s %s>" % (self.__module__, self.__class__.__name__, self.name, self.status)
class TestharnessResult(Result):
default_expected = "OK"
statuses = set(["OK", "ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
class TestharnessSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = set(["PASS", "FAIL", "TIMEOUT", "NOTRUN"])
class ReftestResult(Result):
default_expected = "PASS"
statuses = set(["PASS", "FAIL", "ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
class WdspecResult(Result):
default_expected = "OK"
statuses = set(["OK", "ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
class WdspecSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = set(["PASS", "FAIL", "ERROR"])
def get_run_info(metadata_root, product, **kwargs):
if product == "b2g":
return B2GRunInfo(metadata_root, product, **kwargs)
else:
return RunInfo(metadata_root, product, **kwargs)
class RunInfo(dict):
def __init__(self, metadata_root, product, debug, extras=None):
self._update_mozinfo(metadata_root)
self.update(mozinfo.info)
self["product"] = product
if debug is not None:
self["debug"] = debug
elif "debug" not in self:
# Default to release
self["debug"] = False
if extras is not None:
self.update(extras)
def _update_mozinfo(self, metadata_root):
"""Add extra build information from a mozinfo.json file in a parent
directory"""
path = metadata_root
dirs = set()
while path != os.path.expanduser('~'):
if path in dirs:
break
dirs.add(str(path))
path = os.path.split(path)[0]
mozinfo.find_and_update_from_json(*dirs)
class B2GRunInfo(RunInfo):
def __init__(self, *args, **kwargs):
RunInfo.__init__(self, *args, **kwargs)
self["os"] = "b2g"
class Test(object):
result_cls = None
subtest_result_cls = None
test_type = None
def __init__(self, url, inherit_metadata, test_metadata, timeout=DEFAULT_TIMEOUT, path=None,
protocol="http"):
self.url = url
self._inherit_metadata = inherit_metadata
self._test_metadata = test_metadata
self.timeout = timeout
self.path = path
self.environment = {"protocol": protocol, "prefs": self.prefs}
def __eq__(self, other):
return self.id == other.id
@classmethod
def from_manifest(cls, manifest_item, inherit_metadata, test_metadata):
timeout = LONG_TIMEOUT if manifest_item.timeout == "long" else DEFAULT_TIMEOUT
return cls(manifest_item.url,
inherit_metadata,
test_metadata,
timeout=timeout,
path=manifest_item.path,
protocol="https" if hasattr(manifest_item, "https") and manifest_item.https else "http")
@property
def id(self):
return self.url
@property
def keys(self):
return tuple()
def _get_metadata(self, subtest=None):
if self._test_metadata is not None and subtest is not None:
return self._test_metadata.get_subtest(subtest)
else:
return self._test_metadata
def itermeta(self, subtest=None):
for metadata in self._inherit_metadata:
yield metadata
if self._test_metadata is not None:
yield self._get_metadata()
if subtest is not None:
subtest_meta = self._get_metadata(subtest)
if subtest_meta is not None:
yield subtest_meta
def disabled(self, subtest=None):
for meta in self.itermeta(subtest):
disabled = meta.disabled
if disabled is not None:
return disabled
return None
@property
def restart_after(self):
for meta in self.itermeta(None):
restart_after = meta.restart_after
if restart_after is not None:
return True
return False
@property
def tags(self):
tags = set()
for meta in self.itermeta():
meta_tags = meta.tags
if atom_reset in meta_tags:
tags = meta_tags.copy()
tags.remove(atom_reset)
else:
tags |= meta_tags
tags.add("dir:%s" % self.id.lstrip("/").split("/")[0])
return tags
@property
def prefs(self):
prefs = {}
for meta in self.itermeta():
meta_prefs = meta.prefs
if atom_reset in prefs:
prefs = meta_prefs.copy()
del prefs[atom_reset]
else:
prefs.update(meta_prefs)
return prefs
def expected(self, subtest=None):
if subtest is None:
default = self.result_cls.default_expected
else:
default = self.subtest_result_cls.default_expected
metadata = self._get_metadata(subtest)
if metadata is None:
return default
try:
return metadata.get("expected")
except KeyError:
return default
def __repr__(self):
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.id)
class TestharnessTest(Test):
result_cls = TestharnessResult
subtest_result_cls = TestharnessSubtestResult
test_type = "testharness"
@property
def id(self):
return self.url
class ManualTest(Test):
test_type = "manual"
@property
def id(self):
return self.url
class ReftestTest(Test):
result_cls = ReftestResult
test_type = "reftest"
def __init__(self, url, inherit_metadata, test_metadata, references,
timeout=DEFAULT_TIMEOUT, path=None, viewport_size=None,
dpi=None, protocol="http"):
Test.__init__(self, url, inherit_metadata, test_metadata, timeout, path, protocol)
for _, ref_type in references:
if ref_type not in ("==", "!="):
raise ValueError
self.references = references
self.viewport_size = viewport_size
self.dpi = dpi
@classmethod
def from_manifest(cls,
manifest_test,
inherit_metadata,
test_metadata,
nodes=None,
references_seen=None):
timeout = LONG_TIMEOUT if manifest_test.timeout == "long" else DEFAULT_TIMEOUT
if nodes is None:
nodes = {}
if references_seen is None:
references_seen = set()
url = manifest_test.url
node = cls(manifest_test.url,
inherit_metadata,
test_metadata,
[],
timeout=timeout,
path=manifest_test.path,
viewport_size=manifest_test.viewport_size,
dpi=manifest_test.dpi,
protocol="https" if hasattr(manifest_test, "https") and manifest_test.https else "http")
nodes[url] = node
for ref_url, ref_type in manifest_test.references:
comparison_key = (ref_type,) + tuple(sorted([url, ref_url]))
if ref_url in nodes:
manifest_node = ref_url
if comparison_key in references_seen:
# We have reached a cycle so stop here
# Note that just seeing a node for the second time is not
# enough to detect a cycle because
# A != B != C != A must include C != A
# but A == B == A should not include the redundant B == A.
continue
references_seen.add(comparison_key)
manifest_node = manifest_test.manifest.get_reference(ref_url)
if manifest_node:
reference = ReftestTest.from_manifest(manifest_node,
[],
None,
nodes,
references_seen)
else:
reference = ReftestTest(ref_url, [], None, [])
node.references.append((reference, ref_type))
return node
@property
def id(self):
return self.url
@property
def keys(self):
return ("reftype", "refurl")
class WdspecTest(Test):
result_cls = WdspecResult
subtest_result_cls = WdspecSubtestResult
test_type = "wdspec"
manifest_test_cls = {"reftest": ReftestTest,
"testharness": TestharnessTest,
"manual": ManualTest,
"wdspec": WdspecTest}
def from_manifest(manifest_test, inherit_metadata, test_metadata):
test_cls = manifest_test_cls[manifest_test.item_type]
return test_cls.from_manifest(manifest_test, inherit_metadata, test_metadata)
|
mapnik/mapnik
|
refs/heads/master
|
scons/scons-local-4.1.0/SCons/Tool/packaging/rpm.py
|
2
|
"""SCons.Tool.Packaging.rpm
The rpm packager.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Builder
import SCons.Tool.rpmutils
from SCons.Environment import OverrideEnvironment
from SCons.Tool.packaging import stripinstallbuilder, src_targz
from SCons.Errors import UserError
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
PACKAGEVERSION, DESCRIPTION, SUMMARY, X_RPM_GROUP, LICENSE,
**kw):
# initialize the rpm tool
SCons.Tool.Tool('rpm').generate(env)
bld = env['BUILDERS']['Rpm']
# Generate a UserError whenever the target name has been set explicitly,
# since rpm does not allow for controlling it. This is detected by
# checking if the target has been set to the default by the Package()
# Environment function.
if str(target[0])!="%s-%s"%(NAME, VERSION):
raise UserError( "Setting target is not supported for rpm." )
else:
# Deduce the build architecture, but allow it to be overridden
# by setting ARCHITECTURE in the construction env.
buildarchitecture = SCons.Tool.rpmutils.defaultMachine()
if 'ARCHITECTURE' in kw:
buildarchitecture = kw['ARCHITECTURE']
fmt = '%s-%s-%s.%s.rpm'
srcrpm = fmt % (NAME, VERSION, PACKAGEVERSION, 'src')
binrpm = fmt % (NAME, VERSION, PACKAGEVERSION, buildarchitecture)
target = [ srcrpm, binrpm ]
# get the correct arguments into the kw hash
loc=locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# if no "SOURCE_URL" tag is given add a default one.
if 'SOURCE_URL' not in kw:
kw['SOURCE_URL']=(str(target[0])+".tar.gz").replace('.rpm', '')
# mangle the source and target list for the rpmbuild
env = OverrideEnvironment(env, kw)
target, source = stripinstallbuilder(target, source, env)
target, source = addspecfile(target, source, env)
target, source = collectintargz(target, source, env)
# now call the rpm builder to actually build the packet.
return bld(env, target, source, **kw)
def collectintargz(target, source, env):
""" Puts all source files into a tar.gz file. """
# the rpm tool depends on a source package, until this is changed
# this hack needs to be here that tries to pack all sources in.
sources = env.FindSourceFiles()
# filter out the target we are building the source list for.
sources = [s for s in sources if s not in target]
# find the .spec file for rpm and add it since it is not necessarily found
# by the FindSourceFiles function.
sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
# sort to keep sources from changing order across builds
sources.sort()
# as the source contains the url of the source package this rpm package
# is built from, we extract the target name
tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
try:
tarball = env['SOURCE_URL'].split('/')[-1]
except KeyError as e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
tarball = src_targz.package(env, source=sources, target=tarball,
PACKAGEROOT=env['PACKAGEROOT'], )
return (target, tarball)
def addspecfile(target, source, env):
specfile = "%s-%s" % (env['NAME'], env['VERSION'])
bld = SCons.Builder.Builder(action = build_specfile,
suffix = '.spec',
target_factory = SCons.Node.FS.File)
source.extend(bld(env, specfile, source))
return (target,source)
def build_specfile(target, source, env):
""" Builds a RPM specfile from a dictionary with string metadata and
by analyzing a tree of nodes.
"""
with open(target[0].get_abspath(), 'w') as ofp:
try:
ofp.write(build_specfile_header(env))
ofp.write(build_specfile_sections(env))
ofp.write(build_specfile_filesection(env, source))
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError as e:
raise SCons.Errors.UserError('"%s" package field for RPM is missing.' % e.args[0])
#
# mandatory and optional package tag section
#
def build_specfile_sections(spec):
""" Builds the sections of a rpm specfile.
"""
str = ""
mandatory_sections = {
'DESCRIPTION' : '\n%%description\n%s\n\n', }
str = str + SimpleTagCompiler(mandatory_sections).compile( spec )
optional_sections = {
'DESCRIPTION_' : '%%description -l %s\n%s\n\n',
'CHANGELOG' : '%%changelog\n%s\n\n',
'X_RPM_PREINSTALL' : '%%pre\n%s\n\n',
'X_RPM_POSTINSTALL' : '%%post\n%s\n\n',
'X_RPM_PREUNINSTALL' : '%%preun\n%s\n\n',
'X_RPM_POSTUNINSTALL' : '%%postun\n%s\n\n',
'X_RPM_VERIFY' : '%%verify\n%s\n\n',
# These are for internal use but could possibly be overridden
'X_RPM_PREP' : '%%prep\n%s\n\n',
'X_RPM_BUILD' : '%%build\n%s\n\n',
'X_RPM_INSTALL' : '%%install\n%s\n\n',
'X_RPM_CLEAN' : '%%clean\n%s\n\n',
}
# Default prep, build, install and clean rules
# TODO: optimize those build steps, to not compile the project a second time
if 'X_RPM_PREP' not in spec:
spec['X_RPM_PREP'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"' + '\n%setup -q'
if 'X_RPM_BUILD' not in spec:
spec['X_RPM_BUILD'] = '[ ! -e "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && mkdir "$RPM_BUILD_ROOT"'
if 'X_RPM_INSTALL' not in spec:
spec['X_RPM_INSTALL'] = 'scons --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"'
if 'X_RPM_CLEAN' not in spec:
spec['X_RPM_CLEAN'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"'
str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )
return str
def build_specfile_header(spec):
""" Builds all sections but the %file of a rpm specfile
"""
str = ""
# first the mandatory sections
mandatory_header_fields = {
'NAME' : '%%define name %s\nName: %%{name}\n',
'VERSION' : '%%define version %s\nVersion: %%{version}\n',
'PACKAGEVERSION' : '%%define release %s\nRelease: %%{release}\n',
'X_RPM_GROUP' : 'Group: %s\n',
'SUMMARY' : 'Summary: %s\n',
'LICENSE' : 'License: %s\n',
}
str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )
# now the optional tags
optional_header_fields = {
'VENDOR' : 'Vendor: %s\n',
'X_RPM_URL' : 'Url: %s\n',
'SOURCE_URL' : 'Source: %s\n',
'SUMMARY_' : 'Summary(%s): %s\n',
'ARCHITECTURE' : 'BuildArch: %s\n',
'X_RPM_DISTRIBUTION' : 'Distribution: %s\n',
'X_RPM_ICON' : 'Icon: %s\n',
'X_RPM_PACKAGER' : 'Packager: %s\n',
'X_RPM_GROUP_' : 'Group(%s): %s\n',
'X_RPM_REQUIRES' : 'Requires: %s\n',
'X_RPM_PROVIDES' : 'Provides: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\n',
'X_RPM_SERIAL' : 'Serial: %s\n',
'X_RPM_EPOCH' : 'Epoch: %s\n',
'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\n',
'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\n',
'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\n',
'X_RPM_PREFIX' : 'Prefix: %s\n',
# internal use
'X_RPM_BUILDROOT' : 'BuildRoot: %s\n',
}
# fill in default values:
# Adding a BuildRequires renders the .rpm unbuildable under systems which
# are not managed by rpm, since the database to resolve this dependency is
# missing (take Gentoo as an example)
#if 'X_RPM_BUILDREQUIRES' not in spec:
# spec['X_RPM_BUILDREQUIRES'] = 'scons'
if 'X_RPM_BUILDROOT' not in spec:
spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'
str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )
# Add any extra specfile definitions the user may have supplied.
# These flags get no processing, they are just added.
# github #3164: if we don't turn off debug package generation
# the tests which build packages all fail. If there are no
# extra flags, default to adding this one. If the user wants
# to turn this back on, supply the flag set to None.
if 'X_RPM_EXTRADEFS' not in spec:
spec['X_RPM_EXTRADEFS'] = ['%global debug_package %{nil}']
for extra in spec['X_RPM_EXTRADEFS']:
str += extra + '\n'
return str
#
# mandatory and optional file tags
#
def build_specfile_filesection(spec, files):
""" builds the %file section of the specfile
"""
str = '%files\n'
if 'X_RPM_DEFATTR' not in spec:
spec['X_RPM_DEFATTR'] = '(-,root,root)'
str = str + '%%defattr %s\n' % spec['X_RPM_DEFATTR']
supported_tags = {
'PACKAGING_CONFIG' : '%%config %s',
'PACKAGING_CONFIG_NOREPLACE' : '%%config(noreplace) %s',
'PACKAGING_DOC' : '%%doc %s',
'PACKAGING_UNIX_ATTR' : '%%attr %s',
'PACKAGING_LANG_' : '%%lang(%s) %s',
'PACKAGING_X_RPM_VERIFY' : '%%verify %s',
'PACKAGING_X_RPM_DIR' : '%%dir %s',
'PACKAGING_X_RPM_DOCDIR' : '%%docdir %s',
'PACKAGING_X_RPM_GHOST' : '%%ghost %s', }
for file in files:
# build the tagset
tags = {}
for k in supported_tags.keys():
try:
v = file.GetTag(k)
if v:
tags[k] = v
except AttributeError:
pass
# compile the tagset
str = str + SimpleTagCompiler(supported_tags, mandatory=0).compile( tags )
str = str + ' '
str = str + file.GetTag('PACKAGING_INSTALL_LOCATION')
str = str + '\n\n'
return str
class SimpleTagCompiler:
""" Compile RPM tags by doing simple string substitution.
The replacement specfication is stored in the *tagset* dictionary,
something like::
{"abc" : "cdef %s ", "abc_": "cdef %s %s"}
The :func:`compile` function gets a value dictionary, which may look like::
{"abc": "ghij", "abc_gh": "ij"}
The resulting string will be::
"cdef ghij cdef gh ij"
"""
def __init__(self, tagset, mandatory=1):
self.tagset = tagset
self.mandatory = mandatory
def compile(self, values):
""" Compiles the tagset and returns a str containing the result
"""
def is_international(tag):
return tag.endswith('_')
def get_country_code(tag):
return tag[-2:]
def strip_country_code(tag):
return tag[:-2]
replacements = list(self.tagset.items())
str = ""
domestic = [t for t in replacements if not is_international(t[0])]
for key, replacement in domestic:
try:
str = str + replacement % values[key]
except KeyError as e:
if self.mandatory:
raise e
international = [t for t in replacements if is_international(t[0])]
for key, replacement in international:
try:
x = [t for t in values.items() if strip_country_code(t[0]) == key]
int_values_for_key = [(get_country_code(t[0]),t[1]) for t in x]
for v in int_values_for_key:
str = str + replacement % v
except KeyError as e:
if self.mandatory:
raise e
return str
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Precis/Diamond
|
refs/heads/master
|
src/collectors/nagios/test/testnagios.py
|
31
|
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from nagios import NagiosStatsCollector
##########################################################################
class TestNagiosStatsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NagiosStatsCollector', {
'interval': 10,
'bin': 'true',
'use_sudo': False
})
self.collector = NagiosStatsCollector(config, None)
def test_import(self):
self.assertTrue(NagiosStatsCollector)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('nagiostat').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
metrics = {
'AVGACTHSTLAT': 196,
'AVGACTSVCLAT': 242,
'AVGACTHSTEXT': 4037,
'AVGACTSVCEXT': 340,
'NUMHSTUP': 63,
'NUMHSTDOWN': 0,
'NUMHSTUNR': 0,
'NUMSVCOK': 1409,
'NUMSVCWARN': 3,
'NUMSVCUNKN': 0,
'NUMSVCCRIT': 7,
'NUMHSTACTCHK5M': 56,
'NUMHSTPSVCHK5M': 0,
'NUMSVCACTCHK5M': 541,
'NUMSVCPSVCHK5M': 0,
'NUMACTHSTCHECKS5M': 56,
'NUMOACTHSTCHECKS5M': 1,
'NUMCACHEDHSTCHECKS5M': 1,
'NUMSACTHSTCHECKS5M': 55,
'NUMPARHSTCHECKS5M': 55,
'NUMSERHSTCHECKS5M': 0,
'NUMPSVHSTCHECKS5M': 0,
'NUMACTSVCCHECKS5M': 1101,
'NUMOACTSVCCHECKS5M': 0,
'NUMCACHEDSVCCHECKS5M': 0,
'NUMSACTSVCCHECKS5M': 1101,
'NUMPSVSVCCHECKS5M': 0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
aidanlister/django
|
refs/heads/master
|
django/templatetags/cache.py
|
471
|
from __future__ import unicode_literals
from django.core.cache import InvalidCacheBackendError, caches
from django.core.cache.utils import make_template_fragment_key
from django.template import (
Library, Node, TemplateSyntaxError, VariableDoesNotExist,
)
register = Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time_var, fragment_name, vary_on, cache_name):
self.nodelist = nodelist
self.expire_time_var = expire_time_var
self.fragment_name = fragment_name
self.vary_on = vary_on
self.cache_name = cache_name
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var)
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time)
if self.cache_name:
try:
cache_name = self.cache_name.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.cache_name.var)
try:
fragment_cache = caches[cache_name]
except InvalidCacheBackendError:
raise TemplateSyntaxError('Invalid cache name specified for cache tag: %r' % cache_name)
else:
try:
fragment_cache = caches['template_fragments']
except InvalidCacheBackendError:
fragment_cache = caches['default']
vary_on = [var.resolve(context) for var in self.vary_on]
cache_key = make_template_fragment_key(self.fragment_name, vary_on)
value = fragment_cache.get(cache_key)
if value is None:
value = self.nodelist.render(context)
fragment_cache.set(cache_key, value, expire_time)
return value
@register.tag('cache')
def do_cache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time.
Usage::
{% load cache %}
{% cache [expire_time] [fragment_name] %}
.. some expensive processing ..
{% endcache %}
This tag also supports varying by a list of arguments::
{% load cache %}
{% cache [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcache %}
Optionally the cache to use may be specified thus::
{% cache .... using="cachename" %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endcache',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
if len(tokens) > 3 and tokens[-1].startswith('using='):
cache_name = parser.compile_filter(tokens[-1][len('using='):])
tokens = tokens[:-1]
else:
cache_name = None
return CacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(t) for t in tokens[3:]],
cache_name,
)
|
jdesgats/mongrel2
|
refs/heads/master
|
examples/chat/www.py
|
99
|
import web
import random
urls = (
'/(.*)', 'hello'
)
app = web.application(urls, globals())
class hello:
def GET(self, name):
if not name:
name = 'World'
data = 'Hello, ' + name + '!'
return data * random.randint(1, 1000)
if __name__ == "__main__":
app.run()
|
uiri/pxqz
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/formtools/tests/wizard/wizardtests/tests.py
|
78
|
from __future__ import with_statement
import os
from django import forms
from django.test import TestCase
from django.test.client import RequestFactory
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import CookieWizardView
from django.contrib.formtools.tests.wizard.forms import UserForm, UserFormSet
class WizardTests(object):
urls = 'django.contrib.formtools.tests.wizard.wizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_initial_call(self):
response = self.client.get(self.wizard_url)
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
def test_form_post_error(self):
response = self.client.post(self.wizard_url, self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': [u'This field is required.'],
'user': [u'This field is required.']})
def test_form_post_success(self):
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, {
'wizard_goto_step': response.context['wizard']['steps'].prev})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_template_context(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context.get('another_var', None), None)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
self.assertEqual(response.context.get('another_var', None), True)
def test_form_finish(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
all_data = response.context['form_list']
self.assertEqual(all_data[1]['file1'].read(), open(__file__).read())
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': u'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': u'123 Main St', 'address2': u'Djangoland'},
{'random_crap': u'blah blah'},
[{'random_crap': u'blah blah'},
{'random_crap': u'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
self.assertEqual(all_data['file1'].read(), open(__file__).read())
del all_data['file1']
self.assertEqual(all_data, {
'name': u'Pony', 'thirsty': True, 'user': self.testuser,
'address1': u'123 Main St', 'address2': u'Djangoland',
'random_crap': u'blah blah', 'formset-form4': [
{'random_crap': u'blah blah'},
{'random_crap': u'blah blah'}]})
def test_manipulated_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_refresh(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__)
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
class SessionWizardTests(WizardTests, TestCase):
wizard_url = '/wiz_session/'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
class CookieWizardTests(WizardTests, TestCase):
wizard_url = '/wiz_cookie/'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
class WizardTestKwargs(TestCase):
wizard_url = '/wiz_other_template/'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
urls = 'django.contrib.formtools.tests.wizard.wizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_template(self):
templates = os.path.join(os.path.dirname(__file__), 'templates')
with self.settings(
TEMPLATE_DIRS=list(settings.TEMPLATE_DIRS) + [templates]):
response = self.client.get(self.wizard_url)
self.assertTemplateUsed(response, 'other_wizard_form.html')
class WizardTestGenericViewInterface(TestCase):
def test_get_context_data_inheritance(self):
class TestWizard(CookieWizardView):
"""
A subclass that implements ``get_context_data`` using the standard
protocol for generic views (accept only **kwargs).
See ticket #17148.
"""
def get_context_data(self, **kwargs):
context = super(TestWizard, self).get_context_data(**kwargs)
context['test_key'] = 'test_value'
return context
factory = RequestFactory()
view = TestWizard.as_view([forms.Form])
response = view(factory.get('/'))
self.assertEquals(response.context_data['test_key'], 'test_value')
def test_get_context_data_with_mixin(self):
class AnotherMixin(object):
def get_context_data(self, **kwargs):
context = super(AnotherMixin, self).get_context_data(**kwargs)
context['another_key'] = 'another_value'
return context
class TestWizard(AnotherMixin, CookieWizardView):
"""
A subclass that implements ``get_context_data`` using the standard
protocol for generic views (accept only **kwargs).
See ticket #17148.
"""
def get_context_data(self, **kwargs):
context = super(TestWizard, self).get_context_data(**kwargs)
context['test_key'] = 'test_value'
return context
factory = RequestFactory()
view = TestWizard.as_view([forms.Form])
response = view(factory.get('/'))
self.assertEquals(response.context_data['test_key'], 'test_value')
self.assertEquals(response.context_data['another_key'], 'another_value')
class WizardFormKwargsOverrideTests(TestCase):
def setUp(self):
super(WizardFormKwargsOverrideTests, self).setUp()
self.rf = RequestFactory()
# Create two users so we can filter by is_staff when handing our
# wizard a queryset keyword argument.
self.normal_user = User.objects.create(username='test1', email='normal@example.com')
self.staff_user = User.objects.create(username='test2', email='staff@example.com', is_staff=True)
def test_instance_is_maintained(self):
self.assertEqual(2, User.objects.count())
queryset = User.objects.get(pk=self.staff_user.pk)
class InstanceOverrideWizard(CookieWizardView):
def get_form_kwargs(self, step):
return {'instance': queryset}
view = InstanceOverrideWizard.as_view([UserForm])
response = view(self.rf.get('/'))
form = response.context_data['wizard']['form']
self.assertNotEqual(form.instance.pk, None)
self.assertEqual(form.instance.pk, self.staff_user.pk)
self.assertEqual('staff@example.com', form.initial.get('email', None))
def test_queryset_is_maintained(self):
queryset = User.objects.filter(pk=self.staff_user.pk)
class QuerySetOverrideWizard(CookieWizardView):
def get_form_kwargs(self, step):
return {'queryset': queryset}
view = QuerySetOverrideWizard.as_view([UserFormSet])
response = view(self.rf.get('/'))
formset = response.context_data['wizard']['form']
self.assertNotEqual(formset.queryset, None)
self.assertEqual(formset.initial_form_count(), 1)
self.assertEqual(['staff@example.com'],
list(formset.queryset.values_list('email', flat=True)))
|
kewisch/python-crucible
|
refs/heads/master
|
src/modules/crucible/rest.py
|
2
|
import urllib
import urllib2
import json
import functools
def buildUrl(url, params = []):
if(len(params) > 0):
url += '?'
first = True
for key, value in params:
if(first):
first = False
else:
url += '&'
url += urllib.quote(key) + '=' + urllib.quote(str(value))
return url
class UrlOpenFactory(object):
@property
def httpParams(self):
# we have to send anyting... so why not json?
return {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def createRequest(self, url, data = None):
return urllib2.Request(url, data, self.httpParams)
def urlopen(self, url, data = None):
return urllib2.urlopen(self.createRequest(url, data)).read()
class JsonUrlOpenFactory(UrlOpenFactory):
@property
def httpParams(self):
return {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def urlopen(self, url, data = None):
return json.loads(super(JsonUrlOpenFactory, self).urlopen(url, json.dumps(data) if not data is None else None))
def dumpHttpError(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except urllib2.HTTPError as e:
with open('httpError', 'w') as out:
out.write('\n'.join(e.read().split('\\n')))
raise e
return wrapper
|
Donkyhotay/MoonPy
|
refs/heads/master
|
twisted/test/test_import.py
|
61
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.python.runtime import platformType
class AtLeastImportTestCase(unittest.TestCase):
"""I test that there are no syntax errors which will not allow importing.
"""
failureException = ImportError
def test_misc(self):
"""Test importing other misc. modules
"""
from twisted import copyright
def test_persisted(self):
"""Test importing persisted
"""
from twisted.persisted import dirdbm
from twisted.persisted import styles
def test_internet(self):
"""Test importing internet
"""
from twisted.internet import tcp
from twisted.internet import main
# from twisted.internet import ssl
from twisted.internet import abstract
from twisted.internet import udp
from twisted.internet import protocol
from twisted.internet import defer
def test_unix(self):
"""internet modules for unix."""
from twisted.internet import stdio
from twisted.internet import process
from twisted.internet import unix
if platformType != "posix":
test_unix.skip = "UNIX-only modules"
def test_spread(self):
"""Test importing spreadables
"""
from twisted.spread import pb
from twisted.spread import jelly
from twisted.spread import banana
from twisted.spread import flavors
def test_twistedPython(self):
"""Test importing twisted.python
"""
from twisted.python import hook
from twisted.python import log
from twisted.python import reflect
# from twisted.python import threadable
# from twisted.python import threadpool
from twisted.python import usage
from twisted.python import otp
def test_protocols(self):
"""Test importing protocols
"""
from twisted.protocols import basic
from twisted.protocols import ftp
from twisted.protocols import telnet
from twisted.protocols import policies
def test_enterprise(self):
from twisted.enterprise import adbapi
from twisted.enterprise import reflector
from twisted.enterprise import sqlreflector
from twisted.enterprise import row
|
datenbetrieb/odoo
|
refs/heads/8.0
|
addons/account_anglo_saxon/stock.py
|
57
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class stock_move(osv.Model):
_inherit = "stock.move"
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
""" Add a reference to the stock.move in the invoice line
In anglo-saxon the price for COGS should be taken from stock.move
if possible (fallback on standard_price)
"""
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
res.update({
'move_id': move.id,
})
return res
class stock_picking(osv.osv):
_inherit = "stock.picking"
_description = "Picking List"
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
'''Return ids of created invoices for the pickings'''
res = super(stock_picking,self).action_invoice_create(cr, uid, ids, journal_id, group, type, context=context)
if type in ('in_invoice', 'in_refund'):
for inv in self.pool.get('account.invoice').browse(cr, uid, res, context=context):
for ol in inv.invoice_line:
if ol.product_id.type != 'service':
oa = ol.product_id.property_stock_account_input and ol.product_id.property_stock_account_input.id
if not oa:
oa = ol.product_id.categ_id.property_stock_account_input_categ and ol.product_id.categ_id.property_stock_account_input_categ.id
if oa:
fpos = ol.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
tax_line = ol.invoice_line_tax_id.filtered(lambda l: not l.account_collected_id) if ol.invoice_line_tax_id else False
if tax_line:
for tax in tax_line:
tax_id = self.pool['account.invoice.tax'].search(cr, uid, [('invoice_id', '=', ol.invoice_id.id), ('name', '=', tax.name), ('account_id', '=', ol.account_id.id)], limit=1)
self.pool['account.invoice.tax'].write(cr, uid, tax_id, {'account_id': a})
self.pool.get('account.invoice.line').write(cr, uid, [ol.id], {'account_id': a})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
benoitsteiner/tensorflow-opencl
|
refs/heads/master
|
tensorflow/contrib/slim/python/slim/summaries_test.py
|
73
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.slim.summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
from tensorflow.contrib.slim.python.slim import summaries
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.summary import summary_iterator
class SummariesTest(test.TestCase):
def safe_create(self, output_dir):
if gfile.Exists(output_dir):
gfile.DeleteRecursively(output_dir)
gfile.MakeDirs(output_dir)
def assert_scalar_summary(self, output_dir, names_to_values):
"""Asserts that the given output directory contains written summaries.
Args:
output_dir: The output directory in which to look for even tfiles.
names_to_values: A dictionary of summary names to values.
"""
# The events file may have additional entries, e.g. the event version
# stamp, so have to parse things a bit.
output_filepath = glob.glob(os.path.join(output_dir, '*'))
self.assertEqual(len(output_filepath), 1)
events = summary_iterator.summary_iterator(output_filepath[0])
summaries_list = [e.summary for e in events if e.summary.value]
values = []
for item in summaries_list:
for value in item.value:
values.append(value)
saved_results = {v.tag: v.simple_value for v in values}
for name in names_to_values:
self.assertAlmostEqual(names_to_values[name], saved_results[name])
def testScalarSummaryIsPartOfCollectionWithNoPrint(self):
tensor = array_ops.ones([]) * 3
name = 'my_score'
prefix = 'eval'
op = summaries.add_scalar_summary(tensor, name, prefix, print_summary=False)
self.assertTrue(op in ops.get_collection(ops.GraphKeys.SUMMARIES))
def testScalarSummaryIsPartOfCollectionWithPrint(self):
tensor = array_ops.ones([]) * 3
name = 'my_score'
prefix = 'eval'
op = summaries.add_scalar_summary(tensor, name, prefix, print_summary=True)
self.assertTrue(op in ops.get_collection(ops.GraphKeys.SUMMARIES))
def verify_scalar_summary_is_written(self, print_summary):
value = 3
tensor = array_ops.ones([]) * value
name = 'my_score'
prefix = 'eval'
summaries.add_scalar_summary(tensor, name, prefix, print_summary)
output_dir = os.path.join(self.get_temp_dir(),
'scalar_summary_no_print_test')
self.safe_create(output_dir)
summary_op = summary.merge_all()
summary_writer = summary.FileWriter(output_dir)
with self.test_session() as sess:
new_summary = sess.run(summary_op)
summary_writer.add_summary(new_summary, 1)
summary_writer.flush()
self.assert_scalar_summary(output_dir, {
'%s/%s' % (prefix, name): value
})
def testScalarSummaryIsWrittenWithNoPrint(self):
self.verify_scalar_summary_is_written(print_summary=False)
def testScalarSummaryIsWrittenWithPrint(self):
self.verify_scalar_summary_is_written(print_summary=True)
if __name__ == '__main__':
test.main()
|
elopezga/ErrorRate
|
refs/heads/master
|
ivi/agilent/agilentDSOX3032A.py
|
7
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent3000A import *
class agilentDSOX3032A(agilent3000A):
"Agilent InfiniiVision DSOX3032A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO-X 3032A')
super(agilentDSOX3032A, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels()
|
SummerLW/Perf-Insight-Report
|
refs/heads/test
|
telemetry/telemetry/internal/backends/android_app_backend_unittest.py
|
7
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import mock
import unittest
from telemetry.internal.backends import android_app_backend
from devil.android.sdk import intent as intent_module
_FakeAndroidProcess = collections.namedtuple(
'AndroidProcess', ['app_backend', 'pid', 'name'])
class AndroidAppBackendUnittest(unittest.TestCase):
def setUp(self):
self.platform_backend = mock.Mock()
self.start_intent = intent_module.Intent(
package='com.example.my_app',
activity='com.example.my_app.LaunchMyApp')
self.app_backend = android_app_backend.AndroidAppBackend(
self.platform_backend, self.start_intent)
@mock.patch('telemetry.internal.backends.android_app_backend'
'.android_process.AndroidProcess', _FakeAndroidProcess)
def testGetProcesses(self):
# Only processes belonging to 'com.example.my_app' should match.
self.platform_backend.GetPsOutput.return_value = [
['1111', 'com.example.my_app'],
['2222', 'com.example.my_appointments_helper'],
['3333', 'com.example.my_app:service'],
['4444', 'com.example.some_other_app'],
['5555', 'com_example_my_app'],
]
process_pids = set(p.pid for p in self.app_backend.GetProcesses())
self.assertEquals(process_pids, set(['1111', '3333']))
|
amanharitsh123/zulip
|
refs/heads/master
|
zerver/migrations/0103_remove_userprofile_muted_topics.py
|
3
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-31 00:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0102_convert_muted_topic'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='muted_topics',
),
]
|
seanli9jan/tensorflow
|
refs/heads/master
|
tensorflow/contrib/data/python/ops/iterator_ops.py
|
19
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iterator ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import iterator_ops
from tensorflow.python.util import deprecation
@deprecation.deprecated(
None, "Use `tf.data.experimental.make_saveable_from_iterator(...)`.")
def make_saveable_from_iterator(iterator):
"""Returns a SaveableObject for saving/restore iterator state using Saver.
Args:
iterator: Iterator.
For example:
```python
with tf.Graph().as_default():
ds = tf.data.Dataset.range(10)
iterator = ds.make_initializable_iterator()
# Build the iterator SaveableObject.
saveable_obj = tf.contrib.data.make_saveable_from_iterator(iterator)
# Add the SaveableObject to the SAVEABLE_OBJECTS collection so
# it can be automatically saved using Saver.
tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
saver = tf.train.Saver()
while continue_training:
... Perform training ...
if should_save_checkpoint:
saver.save()
```
Note: When restoring the iterator, the existing iterator state is completely
discarded. This means that any changes you may have made to the Dataset
graph will be discarded as well! This includes the new Dataset graph
that you may have built during validation. So, while running validation,
make sure to run the initializer for the validation input pipeline after
restoring the checkpoint.
Note: Not all iterators support checkpointing yet. Attempting to save the
state of an unsupported iterator will throw an error.
"""
return iterator_ops.make_saveable_from_iterator(iterator)
class CheckpointInputPipelineHook(iterator_ops.CheckpointInputPipelineHook):
"""Checkpoints input pipeline state every N steps or seconds.
This hook saves the state of the iterators in the `Graph` so that when
training is resumed the input pipeline continues from where it left off.
This could potentially avoid overfitting in certain pipelines where the
number of training steps per eval are small compared to the dataset
size or if the training pipeline is pre-empted.
Differences from `CheckpointSaverHook`:
1. Saves only the input pipelines in the "iterators" collection and not the
global variables or other saveable objects.
2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.
Example of checkpointing the training pipeline:
```python
est = tf.estimator.Estimator(model_fn)
while True:
est.train(
train_input_fn,
hooks=[tf.contrib.data.CheckpointInputPipelineHook(est)],
steps=train_steps_per_eval)
# Note: We do not pass the hook here.
metrics = est.evaluate(eval_input_fn)
if should_stop_the_training(metrics):
break
```
This hook should be used if the input pipeline state needs to be saved
separate from the model checkpoint. Doing so may be useful for a few reasons:
1. The input pipeline checkpoint may be large, if there are large shuffle
or prefetch buffers for instance, and may bloat the checkpoint size.
2. If the input pipeline is shared between training and validation, restoring
the checkpoint during validation may override the validation input
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
`tf.contrib.data.make_saveable_from_iterator` directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS
collector when building the eval graph.
"""
@deprecation.deprecated(
None, "Use `tf.data.experimental.CheckpointInputPipelineHook(...)`.")
def __init__(self, estimator):
super(CheckpointInputPipelineHook, self).__init__(estimator)
|
rhelmer/socorro-lib
|
refs/heads/master
|
socorro/external/crash_data_base.py
|
10
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from socorro.external import (
MissingArgumentError,
ResourceNotFound,
ResourceUnavailable,
ServiceUnavailable
)
from socorro.external.crashstorage_base import CrashIDNotFound
from socorro.lib import external_common
class CrashDataBase(object):
"""
Common implementation of the crash data service for all crashstorage
schemes. Any external service that wants to implement a CrashData service
may subclass from this service. All they'd have to do is implement the
'get_storage' method to return an appropriate instance of their own
crashstorage class.
"""
def __init__(self, *args, **kwargs):
super(CrashDataBase, self).__init__()
self.config = kwargs['config']
self.all_services = kwargs['all_services']
def get_storage(self):
"""derived classes must implement this method to return an instance
of their own crashstorage class"""
raise NotImplementedError
def get(self, **kwargs):
"""Return JSON data of a crash report, given its uuid. """
filters = [
('uuid', None, 'str'),
('datatype', None, 'str'),
('name', None, 'str') # only applicable if datatype == 'raw'
]
params = external_common.parse_arguments(filters, kwargs)
if not params.uuid:
raise MissingArgumentError('uuid')
if not params.datatype:
raise MissingArgumentError('datatype')
# get a generic crashstorage instance from whatever external resource
# is implementing this service.
store = self.get_storage()
datatype_method_mapping = {
'raw': 'get_raw_dump',
'meta': 'get_raw_crash',
'processed': 'get_processed',
'unredacted': 'get_unredacted_processed',
}
get = store.__getattribute__(datatype_method_mapping[params.datatype])
try:
if params.datatype == 'raw':
return (
get(params.uuid, name=params.name),
'application/octet-stream'
)
else:
return get(params.uuid)
except CrashIDNotFound:
if params.datatype in ('processed', 'unredacted'):
# try to fetch a raw crash just to ensure that the raw crash
# exists. If this line fails, there's no reason to actually
# submit the priority job.
try:
store.get_raw_crash(params.uuid)
except CrashIDNotFound:
raise ResourceNotFound(params.uuid)
# search through the existing other services to find the
# Priorityjob service.
try:
priorityjob_service_impl = self.all_services[
'Priorityjobs'
]
except KeyError:
raise ServiceUnavailable('Priorityjobs')
# get the underlying implementation of the Priorityjob
# service and instantiate it.
priority_job_service = priorityjob_service_impl.cls(
config=self.config
)
# create the priority job for this crash_ids
priority_job_service.create(uuid=params.uuid)
raise ResourceUnavailable(params.uuid)
raise ResourceNotFound(params.uuid)
|
RyanYoung25/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/depthwise_conv_op_test.py
|
1
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 35, 35, 2],
[4, 147, 147, 24], [3, 299, 299, 3]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [5, 5, 2, 1],
[3, 3, 24, 8], [1, 1, 3, 8]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 11, 11, 2],
[4, 74, 74, 192], [3, 299, 299, 24]]
strides = [1, 1, 1, 3, 2, 1]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, VALID, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(tf.test.TestCase):
# This is testing against the output of the implementation using the
# combination of conv_2d and slicing ops.
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
use_gpu: Whether to use GPU.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t1 = tf.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = tf.constant(x2, shape=filter_in_sizes)
conv_native = tf.nn.depthwise_conv2d_native(
t1,
t2,
strides=[1, stride, stride, 1],
padding=padding)
conv_gold = tf.nn.depthwise_conv2d(t1,
t2,
strides=[1, stride, stride, 1],
padding=padding)
native_result = sess.run(conv_native)
gold_result = sess.run(conv_gold)
self.assertArrayNear(np.ravel(native_result), np.ravel(gold_result), 1e-5)
self.assertShapeEqual(native_result, conv_native)
self.assertShapeEqual(native_result, conv_gold)
def testDepthwiseConv2D(self):
for _, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
self._VerifyValues(input_size, filter_size, stride, padding, use_gpu=True)
self._VerifyValues(input_size,
filter_size,
stride,
padding,
use_gpu=False)
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected, use_gpu):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether to use GPU.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t1 = tf.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = tf.constant(x2, shape=filter_in_sizes)
conv = tf.nn.depthwise_conv2d_native(t1,
t2,
strides=[1, stride, stride, 1],
padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=False)
self._VerifyHandValues(tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=True)
if __name__ == "__main__":
tf.test.main()
|
baoboa/pizza-lammps
|
refs/heads/master
|
src/cfg.py
|
54
|
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# cfg tool
oneline = "Convert LAMMPS snapshots to AtomEye CFG format"
docstr = """
c = cfg(d) d = object containing atom coords (dump, data)
c.one() write all snapshots to tmp.cfg
c.one("new") write all snapshots to new.cfg
c.many() write snapshots to tmp0000.cfg, tmp0001.cfg, etc
c.many("new") write snapshots to new0000.cfg, new0001.cfg, etc
c.single(N) write snapshot for timestep N to tmp.cfg
c.single(N,"file") write snapshot for timestep N to file.cfg
"""
# History
# 11/06, Aidan Thompson (SNL): original version
# ToDo list
# should decide if dump is scaled or not, since CFG prints in scaled coords
# this creates a simple AtomEye CFG format
# there is more complex format we could write out
# which allows for extra atom info, e.g. to do atom coloring on
# how to dump for a triclinic box, since AtomEye accepts this
# Variables
# data = data file to read from
# Imports and external programs
import sys
# Class definition
class cfg:
# --------------------------------------------------------------------
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def one(self,*args):
if len(args) == 0: file = "tmp.cfg"
elif args[0][-4:] == ".cfg": file = args[0]
else: file = args[0] + ".cfg"
f = open(file,"w")
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
time,box,atoms,bonds,tris,lines = self.data.viz(which)
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
print time,
sys.stdout.flush()
n += 1
f.close()
print "\nwrote %d snapshots to %s in CFG format" % (n,file)
# --------------------------------------------------------------------
def many(self,*args):
if len(args) == 0: root = "tmp"
else: root = args[0]
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
time,box,atoms,bonds,tris,lines = self.data.viz(which)
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".cfg"
f = open(file,"w")
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
print time,
sys.stdout.flush()
f.close()
n += 1
print "\nwrote %s snapshots in CFG format" % n
# --------------------------------------------------------------------
def single(self,time,*args):
if len(args) == 0: file = "tmp.cfg"
elif args[0][-4:] == ".cfg": file = args[0]
else: file = args[0] + ".cfg"
which = self.data.findtime(time)
time,box,atoms,bonds,tris,lines = self.data.viz(which)
f = open(file,"w")
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
f.close()
|
kazcw/bitcoin
|
refs/heads/master
|
test/functional/rpc_fundrawtransaction.py
|
8
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
watchonly_vout = find_vout_for_address(self.nodes[0], watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.stop_nodes()
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
# Again lock the watchonly UTXO or nodes[0] may spend it, because
# lockunspent is memory-only and thus lost on restart
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
|
anisku11/sublimeku
|
refs/heads/master
|
Packages/pygments/all/pygments/lexers/smalltalk.py
|
47
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.smalltalk
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Smalltalk and related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['SmalltalkLexer', 'NewspeakLexer']
class SmalltalkLexer(RegexLexer):
"""
For `Smalltalk <http://www.smalltalk.org/>`_ syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
.. versionadded:: 0.10
"""
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak', 'st']
mimetypes = ['text/x-smalltalk']
tokens = {
'root': [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition': [
# Not perfect can't allow whitespaces at the beginning and the
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables': [
include('whitespaces'),
(r'(:)(\s*)(\w+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
default('#pop'), # else pop
],
'literals': [
(r"'(''|[^'])*'", String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper': [
include('whitespaces'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
# literals
(r"'(''|[^'])*'", String),
(r'\$.', String.Char),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth': [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root', 'afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces': [
# skip whitespace and comments
(r'\s+', Text),
(r'"(""|[^"])*"', Comment),
],
'objects': [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject': [
(r'! !$', Keyword, '#pop'), # squeak chunk delimiter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout': [
# Squeak fileout format (optional)
(r'^"(""|[^"])*"!', Keyword),
(r"^'(''|[^'])*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class NewspeakLexer(RegexLexer):
"""
For `Newspeak <http://newspeaklanguage.org/>` syntax.
.. versionadded:: 1.1
"""
name = 'Newspeak'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root': [
(r'\b(Newsqueak2)\b', Keyword.Declaration),
(r"'[^']*'", String),
(r'\b(class)(\s+)(\w+)(\s*)',
bygroups(Keyword.Declaration, Text, Name.Class, Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'(\w+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
(r'<\w+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+', Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"#'[^']*'", String.Symbol),
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace': [
(r'\s+', Text),
(r'"[^"]*"', Comment)
],
}
|
sonata-nfv/son-cli
|
refs/heads/master
|
setup.py
|
5
|
# Copyright (c) 2015 SONATA-NFV, UBIWHERE
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UBIWHERE
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
from setuptools import setup, find_packages
import codecs
import os.path as path
# buildout build system
# http://www.buildout.org/en/latest/docs/tutorial.html
# setup() documentation:
# http://python-packaging-user-guide.readthedocs.org/en/
# latest/distributing/#setup-py
cwd = path.dirname(__file__)
longdesc = codecs.open(path.join(cwd, 'README.md'), 'r', 'utf-8').read()
name = 'sonata-cli'
setup(
name=name,
license='Apache License, Version 2.0',
version='3.0',
url='https://github.com/sonata-nfv/son-cli',
author_email='sonata-dev@sonata-nfv.eu',
long_description=longdesc,
package_dir={'': 'src'},
packages=find_packages('src'), # dependency resolution
namespace_packages=['son', ],
include_package_data=True,
package_data= {
'son': ['schema/tests/son-schema/*', 'workspace/samples/*',
'monitor/docker_compose_files/*', 'monitor/grafana/*',
'monitor/prometheus/*', 'monitor/*.exp',
'validate/eventcfg.yml']
},
# in jenkins, the last package in the list is installed first
install_requires=['setuptools', 'pyaml', 'jsonschema', 'validators',
'requests>2.4.2', 'coloredlogs<=5.1.1', 'paramiko',
'termcolor', 'tabulate', 'networkx<=1.12', 'Flask',
'PyJWT>=1.4.2', 'docker==2.0.2', 'scipy', 'numpy',
'watchdog', 'Flask-Cors', 'flask_cache', 'redis',
'pycrypto', 'matplotlib', 'prometheus_client',
'requests-toolbelt==0.8.0'],
zip_safe=False,
entry_points={
'console_scripts': [
'son-workspace=son.workspace.workspace:main',
'son-package=son.package.package:main',
'son-monitor=son.monitor.monitor:main',
'son-profile=son.profile.profile:main',
'son-validate=son.validate.validate:main',
'son-validate-api=son.validate.api.api:main',
'son-access=son.access.access:main'
],
},
test_suite='son',
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
|
TerryHowe/ansible-modules-hashivault
|
refs/heads/master
|
ansible/modules/hashivault/hashivault_approle_role_secret_list.py
|
1
|
#!/usr/bin/env python
from hvac.exceptions import InvalidPath
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_approle_role_secret_list
version_added: "3.8.0"
short_description: Hashicorp Vault approle role secret id get module
description:
- Module to get a approle role secret id from Hashicorp Vault.
options:
name:
description:
- role name.
mount_point:
description:
- mount point for role
default: approle
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_approle_role_secret_list:
name: 'ashley'
register: 'vault_approle_role_secret_list'
- debug: msg="Role secrets are {{vault_approle_role_secret_list.secrets}}"
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str')
argspec['mount_point'] = dict(required=False, type='str', default='approle')
module = hashivault_init(argspec)
result = hashivault_approle_role_secret_list(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
@hashiwrapper
def hashivault_approle_role_secret_list(params):
name = params.get('name')
mount_point = params.get('mount_point')
client = hashivault_auth_client(params)
try:
secrets = client.list_role_secrets(name, mount_point=mount_point)
except InvalidPath:
return {'secrets': []}
secrets = secrets.get('data', {}).get('keys', [])
return {'secrets': str(secrets)}
if __name__ == '__main__':
main()
|
you21979/phantomjs
|
refs/heads/2.0
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/update.py
|
124
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class Update(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
Options.update,
Options.quiet,
]
def run(self, state):
if not self._options.update:
return
_log.info("Updating working directory")
self._tool.executive.run_and_throw_if_fail(self._update_command(), quiet=self._options.quiet, cwd=self._tool.scm().checkout_root)
def _update_command(self):
update_command = self._tool.deprecated_port().update_webkit_command(self._options.non_interactive)
return update_command
|
matrix-org/synapse
|
refs/heads/master
|
tests/rest/admin/test_user.py
|
1
|
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import json
import urllib.parse
from binascii import unhexlify
from typing import List, Optional
from unittest.mock import Mock, patch
import synapse.rest.admin
from synapse.api.constants import UserTypes
from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError
from synapse.api.room_versions import RoomVersions
from synapse.rest.client.v1 import login, logout, profile, room
from synapse.rest.client.v2_alpha import devices, sync
from synapse.types import JsonDict, UserID
from tests import unittest
from tests.server import FakeSite, make_request
from tests.test_utils import make_awaitable
from tests.unittest import override_config
class UserRegisterTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
profile.register_servlets,
]
def make_homeserver(self, reactor, clock):
self.url = "/_synapse/admin/v1/register"
self.registration_handler = Mock()
self.identity_handler = Mock()
self.login_handler = Mock()
self.device_handler = Mock()
self.device_handler.check_device_registered = Mock(return_value="FAKE")
self.datastore = Mock(return_value=Mock())
self.datastore.get_current_state_deltas = Mock(return_value=(0, []))
self.hs = self.setup_test_homeserver()
self.hs.config.registration_shared_secret = "shared"
self.hs.get_media_repository = Mock()
self.hs.get_deactivate_account_handler = Mock()
return self.hs
def test_disabled(self):
"""
If there is no shared secret, registration through this method will be
prevented.
"""
self.hs.config.registration_shared_secret = None
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(
"Shared secret registration is not enabled", channel.json_body["error"]
)
def test_get_nonce(self):
"""
Calling GET on the endpoint will return a randomised nonce, using the
homeserver's secrets provider.
"""
with patch("secrets.token_hex") as token_hex:
# Patch secrets.token_hex for the duration of this context
token_hex.return_value = "abcd"
channel = self.make_request("GET", self.url)
self.assertEqual(channel.json_body, {"nonce": "abcd"})
def test_expired_nonce(self):
"""
Calling GET on the endpoint will return a randomised nonce, which will
only last for SALT_TIMEOUT (60s).
"""
channel = self.make_request("GET", self.url)
nonce = channel.json_body["nonce"]
# 59 seconds
self.reactor.advance(59)
body = json.dumps({"nonce": nonce})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("username must be specified", channel.json_body["error"])
# 61 seconds
self.reactor.advance(2)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("unrecognised nonce", channel.json_body["error"])
def test_register_incorrect_nonce(self):
"""
Only the provided nonce can be used, as it's checked in the MAC.
"""
channel = self.make_request("GET", self.url)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(b"notthenonce\x00bob\x00abc123\x00admin")
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob",
"password": "abc123",
"admin": True,
"mac": want_mac,
}
)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("HMAC incorrect", channel.json_body["error"])
def test_register_correct_nonce(self):
"""
When the correct nonce is provided, and the right key is provided, the
user is registered.
"""
channel = self.make_request("GET", self.url)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(
nonce.encode("ascii") + b"\x00bob\x00abc123\x00admin\x00support"
)
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob",
"password": "abc123",
"admin": True,
"user_type": UserTypes.SUPPORT,
"mac": want_mac,
}
)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["user_id"])
def test_nonce_reuse(self):
"""
A valid unrecognised nonce.
"""
channel = self.make_request("GET", self.url)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(nonce.encode("ascii") + b"\x00bob\x00abc123\x00admin")
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob",
"password": "abc123",
"admin": True,
"mac": want_mac,
}
)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["user_id"])
# Now, try and reuse it
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("unrecognised nonce", channel.json_body["error"])
def test_missing_parts(self):
"""
Synapse will complain if you don't give nonce, username, password, and
mac. Admin and user_types are optional. Additional checks are done for length
and type.
"""
def nonce():
channel = self.make_request("GET", self.url)
return channel.json_body["nonce"]
#
# Nonce check
#
# Must be present
body = json.dumps({})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("nonce must be specified", channel.json_body["error"])
#
# Username checks
#
# Must be present
body = json.dumps({"nonce": nonce()})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("username must be specified", channel.json_body["error"])
# Must be a string
body = json.dumps({"nonce": nonce(), "username": 1234})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("Invalid username", channel.json_body["error"])
# Must not have null bytes
body = json.dumps({"nonce": nonce(), "username": "abcd\u0000"})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("Invalid username", channel.json_body["error"])
# Must not have null bytes
body = json.dumps({"nonce": nonce(), "username": "a" * 1000})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("Invalid username", channel.json_body["error"])
#
# Password checks
#
# Must be present
body = json.dumps({"nonce": nonce(), "username": "a"})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("password must be specified", channel.json_body["error"])
# Must be a string
body = json.dumps({"nonce": nonce(), "username": "a", "password": 1234})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("Invalid password", channel.json_body["error"])
# Must not have null bytes
body = json.dumps({"nonce": nonce(), "username": "a", "password": "abcd\u0000"})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("Invalid password", channel.json_body["error"])
# Super long
body = json.dumps({"nonce": nonce(), "username": "a", "password": "A" * 1000})
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("Invalid password", channel.json_body["error"])
#
# user_type check
#
# Invalid user_type
body = json.dumps(
{
"nonce": nonce(),
"username": "a",
"password": "1234",
"user_type": "invalid",
}
)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("Invalid user type", channel.json_body["error"])
def test_displayname(self):
"""
Test that displayname of new user is set
"""
# set no displayname
channel = self.make_request("GET", self.url)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(nonce.encode("ascii") + b"\x00bob1\x00abc123\x00notadmin")
want_mac = want_mac.hexdigest()
body = json.dumps(
{"nonce": nonce, "username": "bob1", "password": "abc123", "mac": want_mac}
)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob1:test", channel.json_body["user_id"])
channel = self.make_request("GET", "/profile/@bob1:test/displayname")
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("bob1", channel.json_body["displayname"])
# displayname is None
channel = self.make_request("GET", self.url)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(nonce.encode("ascii") + b"\x00bob2\x00abc123\x00notadmin")
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob2",
"displayname": None,
"password": "abc123",
"mac": want_mac,
}
)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob2:test", channel.json_body["user_id"])
channel = self.make_request("GET", "/profile/@bob2:test/displayname")
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("bob2", channel.json_body["displayname"])
# displayname is empty
channel = self.make_request("GET", self.url)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(nonce.encode("ascii") + b"\x00bob3\x00abc123\x00notadmin")
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob3",
"displayname": "",
"password": "abc123",
"mac": want_mac,
}
)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob3:test", channel.json_body["user_id"])
channel = self.make_request("GET", "/profile/@bob3:test/displayname")
self.assertEqual(404, int(channel.result["code"]), msg=channel.result["body"])
# set displayname
channel = self.make_request("GET", self.url)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(nonce.encode("ascii") + b"\x00bob4\x00abc123\x00notadmin")
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob4",
"displayname": "Bob's Name",
"password": "abc123",
"mac": want_mac,
}
)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob4:test", channel.json_body["user_id"])
channel = self.make_request("GET", "/profile/@bob4:test/displayname")
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("Bob's Name", channel.json_body["displayname"])
@override_config(
{"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0}
)
def test_register_mau_limit_reached(self):
"""
Check we can register a user via the shared secret registration API
even if the MAU limit is reached.
"""
handler = self.hs.get_registration_handler()
store = self.hs.get_datastore()
# Set monthly active users to the limit
store.get_monthly_active_count = Mock(
return_value=make_awaitable(self.hs.config.max_mau_value)
)
# Check that the blocking of monthly active users is working as expected
# The registration of a new user fails due to the limit
self.get_failure(
handler.register_user(localpart="local_part"), ResourceLimitError
)
# Register new user with admin API
channel = self.make_request("GET", self.url)
nonce = channel.json_body["nonce"]
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
want_mac.update(
nonce.encode("ascii") + b"\x00bob\x00abc123\x00admin\x00support"
)
want_mac = want_mac.hexdigest()
body = json.dumps(
{
"nonce": nonce,
"username": "bob",
"password": "abc123",
"admin": True,
"user_type": UserTypes.SUPPORT,
"mac": want_mac,
}
)
channel = self.make_request("POST", self.url, body.encode("utf8"))
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["user_id"])
class UsersListTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
url = "/_synapse/admin/v2/users"
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
def test_no_auth(self):
"""
Try to list users without authentication.
"""
channel = self.make_request("GET", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
self._create_users(1)
other_user_token = self.login("user1", "pass1")
channel = self.make_request("GET", self.url, access_token=other_user_token)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_all_users(self):
"""
List all users, including deactivated users.
"""
self._create_users(2)
channel = self.make_request(
"GET",
self.url + "?deactivated=true",
b"{}",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(3, len(channel.json_body["users"]))
self.assertEqual(3, channel.json_body["total"])
# Check that all fields are available
self._check_fields(channel.json_body["users"])
def test_search_term(self):
"""Test that searching for a users works correctly"""
def _search_test(
expected_user_id: Optional[str],
search_term: str,
search_field: Optional[str] = "name",
expected_http_code: Optional[int] = 200,
):
"""Search for a user and check that the returned user's id is a match
Args:
expected_user_id: The user_id expected to be returned by the API. Set
to None to expect zero results for the search
search_term: The term to search for user names with
search_field: Field which is to request: `name` or `user_id`
expected_http_code: The expected http code for the request
"""
url = self.url + "?%s=%s" % (
search_field,
search_term,
)
channel = self.make_request(
"GET",
url.encode("ascii"),
access_token=self.admin_user_tok,
)
self.assertEqual(expected_http_code, channel.code, msg=channel.json_body)
if expected_http_code != 200:
return
# Check that users were returned
self.assertTrue("users" in channel.json_body)
self._check_fields(channel.json_body["users"])
users = channel.json_body["users"]
# Check that the expected number of users were returned
expected_user_count = 1 if expected_user_id else 0
self.assertEqual(len(users), expected_user_count)
self.assertEqual(channel.json_body["total"], expected_user_count)
if expected_user_id:
# Check that the first returned user id is correct
u = users[0]
self.assertEqual(expected_user_id, u["name"])
self._create_users(2)
user1 = "@user1:test"
user2 = "@user2:test"
# Perform search tests
_search_test(user1, "er1")
_search_test(user1, "me 1")
_search_test(user2, "er2")
_search_test(user2, "me 2")
_search_test(user1, "er1", "user_id")
_search_test(user2, "er2", "user_id")
# Test case insensitive
_search_test(user1, "ER1")
_search_test(user1, "NAME 1")
_search_test(user2, "ER2")
_search_test(user2, "NAME 2")
_search_test(user1, "ER1", "user_id")
_search_test(user2, "ER2", "user_id")
_search_test(None, "foo")
_search_test(None, "bar")
_search_test(None, "foo", "user_id")
_search_test(None, "bar", "user_id")
def test_invalid_parameter(self):
"""
If parameters are invalid, an error is returned.
"""
# negative limit
channel = self.make_request(
"GET",
self.url + "?limit=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from
channel = self.make_request(
"GET",
self.url + "?from=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid guests
channel = self.make_request(
"GET",
self.url + "?guests=not_bool",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
# invalid deactivated
channel = self.make_request(
"GET",
self.url + "?deactivated=not_bool",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
# unkown order_by
channel = self.make_request(
"GET",
self.url + "?order_by=bar",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
# invalid search order
channel = self.make_request(
"GET",
self.url + "?dir=bar",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
def test_limit(self):
"""
Testing list of users with limit
"""
number_users = 20
# Create one less user (since there's already an admin user).
self._create_users(number_users - 1)
channel = self.make_request(
"GET",
self.url + "?limit=5",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 5)
self.assertEqual(channel.json_body["next_token"], "5")
self._check_fields(channel.json_body["users"])
def test_from(self):
"""
Testing list of users with a defined starting point (from)
"""
number_users = 20
# Create one less user (since there's already an admin user).
self._create_users(number_users - 1)
channel = self.make_request(
"GET",
self.url + "?from=5",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 15)
self.assertNotIn("next_token", channel.json_body)
self._check_fields(channel.json_body["users"])
def test_limit_and_from(self):
"""
Testing list of users with a defined starting point and limit
"""
number_users = 20
# Create one less user (since there's already an admin user).
self._create_users(number_users - 1)
channel = self.make_request(
"GET",
self.url + "?from=5&limit=10",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(channel.json_body["next_token"], "15")
self.assertEqual(len(channel.json_body["users"]), 10)
self._check_fields(channel.json_body["users"])
def test_next_token(self):
"""
Testing that `next_token` appears at the right place
"""
number_users = 20
# Create one less user (since there's already an admin user).
self._create_users(number_users - 1)
# `next_token` does not appear
# Number of results is the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=20",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), number_users)
self.assertNotIn("next_token", channel.json_body)
# `next_token` does not appear
# Number of max results is larger than the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=21",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), number_users)
self.assertNotIn("next_token", channel.json_body)
# `next_token` does appear
# Number of max results is smaller than the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=19",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 19)
self.assertEqual(channel.json_body["next_token"], "19")
# Check
# Set `from` to value of `next_token` for request remaining entries
# `next_token` does not appear
channel = self.make_request(
"GET",
self.url + "?from=19",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_users)
self.assertEqual(len(channel.json_body["users"]), 1)
self.assertNotIn("next_token", channel.json_body)
def test_order_by(self):
"""
Testing order list with parameter `order_by`
"""
user1 = self.register_user("user1", "pass1", admin=False, displayname="Name Z")
user2 = self.register_user("user2", "pass2", admin=False, displayname="Name Y")
# Modify user
self.get_success(self.store.set_user_deactivated_status(user1, True))
self.get_success(self.store.set_shadow_banned(UserID.from_string(user1), True))
# Set avatar URL to all users, that no user has a NULL value to avoid
# different sort order between SQlite and PostreSQL
self.get_success(self.store.set_profile_avatar_url("user1", "mxc://url3"))
self.get_success(self.store.set_profile_avatar_url("user2", "mxc://url2"))
self.get_success(self.store.set_profile_avatar_url("admin", "mxc://url1"))
# order by default (name)
self._order_test([self.admin_user, user1, user2], None)
self._order_test([self.admin_user, user1, user2], None, "f")
self._order_test([user2, user1, self.admin_user], None, "b")
# order by name
self._order_test([self.admin_user, user1, user2], "name")
self._order_test([self.admin_user, user1, user2], "name", "f")
self._order_test([user2, user1, self.admin_user], "name", "b")
# order by displayname
self._order_test([user2, user1, self.admin_user], "displayname")
self._order_test([user2, user1, self.admin_user], "displayname", "f")
self._order_test([self.admin_user, user1, user2], "displayname", "b")
# order by is_guest
# like sort by ascending name, as no guest user here
self._order_test([self.admin_user, user1, user2], "is_guest")
self._order_test([self.admin_user, user1, user2], "is_guest", "f")
self._order_test([self.admin_user, user1, user2], "is_guest", "b")
# order by admin
self._order_test([user1, user2, self.admin_user], "admin")
self._order_test([user1, user2, self.admin_user], "admin", "f")
self._order_test([self.admin_user, user1, user2], "admin", "b")
# order by deactivated
self._order_test([self.admin_user, user2, user1], "deactivated")
self._order_test([self.admin_user, user2, user1], "deactivated", "f")
self._order_test([user1, self.admin_user, user2], "deactivated", "b")
# order by user_type
# like sort by ascending name, as no special user type here
self._order_test([self.admin_user, user1, user2], "user_type")
self._order_test([self.admin_user, user1, user2], "user_type", "f")
self._order_test([self.admin_user, user1, user2], "is_guest", "b")
# order by shadow_banned
self._order_test([self.admin_user, user2, user1], "shadow_banned")
self._order_test([self.admin_user, user2, user1], "shadow_banned", "f")
self._order_test([user1, self.admin_user, user2], "shadow_banned", "b")
# order by avatar_url
self._order_test([self.admin_user, user2, user1], "avatar_url")
self._order_test([self.admin_user, user2, user1], "avatar_url", "f")
self._order_test([user1, user2, self.admin_user], "avatar_url", "b")
def _order_test(
self,
expected_user_list: List[str],
order_by: Optional[str],
dir: Optional[str] = None,
):
"""Request the list of users in a certain order. Assert that order is what
we expect
Args:
expected_user_list: The list of user_id in the order we expect to get
back from the server
order_by: The type of ordering to give the server
dir: The direction of ordering to give the server
"""
url = self.url + "?deactivated=true&"
if order_by is not None:
url += "order_by=%s&" % (order_by,)
if dir is not None and dir in ("b", "f"):
url += "dir=%s" % (dir,)
channel = self.make_request(
"GET",
url.encode("ascii"),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], len(expected_user_list))
returned_order = [row["name"] for row in channel.json_body["users"]]
self.assertEqual(expected_user_list, returned_order)
self._check_fields(channel.json_body["users"])
def _check_fields(self, content: JsonDict):
"""Checks that the expected user attributes are present in content
Args:
content: List that is checked for content
"""
for u in content:
self.assertIn("name", u)
self.assertIn("is_guest", u)
self.assertIn("admin", u)
self.assertIn("user_type", u)
self.assertIn("deactivated", u)
self.assertIn("shadow_banned", u)
self.assertIn("displayname", u)
self.assertIn("avatar_url", u)
def _create_users(self, number_users: int):
"""
Create a number of users
Args:
number_users: Number of users to be created
"""
for i in range(1, number_users + 1):
self.register_user(
"user%d" % i,
"pass%d" % i,
admin=False,
displayname="Name %d" % i,
)
class DeactivateAccountTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass", displayname="User1")
self.other_user_token = self.login("user", "pass")
self.url_other_user = "/_synapse/admin/v2/users/%s" % urllib.parse.quote(
self.other_user
)
self.url = "/_synapse/admin/v1/deactivate/%s" % urllib.parse.quote(
self.other_user
)
# set attributes for user
self.get_success(
self.store.set_profile_avatar_url("user", "mxc://servername/mediaid")
)
self.get_success(
self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0)
)
def test_no_auth(self):
"""
Try to deactivate users without authentication.
"""
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_not_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
url = "/_synapse/admin/v1/deactivate/@bob:test"
channel = self.make_request("POST", url, access_token=self.other_user_token)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("You are not a server admin", channel.json_body["error"])
channel = self.make_request(
"POST",
url,
access_token=self.other_user_token,
content=b"{}",
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("You are not a server admin", channel.json_body["error"])
def test_user_does_not_exist(self):
"""
Tests that deactivation for a user that does not exist returns a 404
"""
channel = self.make_request(
"POST",
"/_synapse/admin/v1/deactivate/@unknown_person:test",
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_erase_is_not_bool(self):
"""
If parameter `erase` is not boolean, return an error
"""
body = json.dumps({"erase": "False"})
channel = self.make_request(
"POST",
self.url,
content=body.encode(encoding="utf_8"),
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"])
def test_user_is_not_local(self):
"""
Tests that deactivation for a user that is not a local returns a 400
"""
url = "/_synapse/admin/v1/deactivate/@unknown_person:unknown_domain"
channel = self.make_request("POST", url, access_token=self.admin_user_tok)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only deactivate local users", channel.json_body["error"])
def test_deactivate_user_erase_true(self):
"""
Test deactivating an user and set `erase` to `true`
"""
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(False, channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User1", channel.json_body["displayname"])
# Deactivate user
body = json.dumps({"erase": True})
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(True, channel.json_body["deactivated"])
self.assertEqual(0, len(channel.json_body["threepids"]))
self.assertIsNone(channel.json_body["avatar_url"])
self.assertIsNone(channel.json_body["displayname"])
self._is_erased("@user:test", True)
def test_deactivate_user_erase_false(self):
"""
Test deactivating an user and set `erase` to `false`
"""
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(False, channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User1", channel.json_body["displayname"])
# Deactivate user
body = json.dumps({"erase": False})
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(True, channel.json_body["deactivated"])
self.assertEqual(0, len(channel.json_body["threepids"]))
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User1", channel.json_body["displayname"])
self._is_erased("@user:test", False)
def _is_erased(self, user_id: str, expect: bool) -> None:
"""Assert that the user is erased or not"""
d = self.store.is_user_erased(user_id)
if expect:
self.assertTrue(self.get_success(d))
else:
self.assertFalse(self.get_success(d))
class UserRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.auth_handler = hs.get_auth_handler()
# create users and get access tokens
# regardless of whether password login or SSO is allowed
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.get_success(
self.auth_handler.get_access_token_for_user_id(
self.admin_user, device_id=None, valid_until_ms=None
)
)
self.other_user = self.register_user("user", "pass", displayname="User")
self.other_user_token = self.get_success(
self.auth_handler.get_access_token_for_user_id(
self.other_user, device_id=None, valid_until_ms=None
)
)
self.url_other_user = "/_synapse/admin/v2/users/%s" % urllib.parse.quote(
self.other_user
)
def test_requester_is_no_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
url = "/_synapse/admin/v2/users/@bob:test"
channel = self.make_request(
"GET",
url,
access_token=self.other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("You are not a server admin", channel.json_body["error"])
channel = self.make_request(
"PUT",
url,
access_token=self.other_user_token,
content=b"{}",
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("You are not a server admin", channel.json_body["error"])
def test_user_does_not_exist(self):
"""
Tests that a lookup for a user that does not exist returns a 404
"""
channel = self.make_request(
"GET",
"/_synapse/admin/v2/users/@unknown_person:test",
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"])
def test_create_server_admin(self):
"""
Check that a new admin user is created successfully.
"""
url = "/_synapse/admin/v2/users/@bob:test"
# Create user (server admin)
body = json.dumps(
{
"password": "abc123",
"admin": True,
"displayname": "Bob's name",
"threepids": [{"medium": "email", "address": "bob@bob.bob"}],
"avatar_url": "mxc://fibble/wibble",
}
)
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
self.assertTrue(channel.json_body["admin"])
self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"])
# Get user
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
self.assertTrue(channel.json_body["admin"])
self.assertFalse(channel.json_body["is_guest"])
self.assertFalse(channel.json_body["deactivated"])
self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"])
def test_create_user(self):
"""
Check that a new regular user is created successfully.
"""
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
body = json.dumps(
{
"password": "abc123",
"admin": False,
"displayname": "Bob's name",
"threepids": [{"medium": "email", "address": "bob@bob.bob"}],
"avatar_url": "mxc://fibble/wibble",
}
)
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
self.assertFalse(channel.json_body["admin"])
self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"])
# Get user
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("Bob's name", channel.json_body["displayname"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
self.assertFalse(channel.json_body["admin"])
self.assertFalse(channel.json_body["is_guest"])
self.assertFalse(channel.json_body["deactivated"])
self.assertFalse(channel.json_body["shadow_banned"])
self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"])
@override_config(
{"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0}
)
def test_create_user_mau_limit_reached_active_admin(self):
"""
Check that an admin can register a new user via the admin API
even if the MAU limit is reached.
Admin user was active before creating user.
"""
handler = self.hs.get_registration_handler()
# Sync to set admin user to active
# before limit of monthly active users is reached
channel = self.make_request("GET", "/sync", access_token=self.admin_user_tok)
if channel.code != 200:
raise HttpResponseException(
channel.code, channel.result["reason"], channel.result["body"]
)
# Set monthly active users to the limit
self.store.get_monthly_active_count = Mock(
return_value=make_awaitable(self.hs.config.max_mau_value)
)
# Check that the blocking of monthly active users is working as expected
# The registration of a new user fails due to the limit
self.get_failure(
handler.register_user(localpart="local_part"), ResourceLimitError
)
# Register new user with admin API
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
body = json.dumps({"password": "abc123", "admin": False})
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertFalse(channel.json_body["admin"])
@override_config(
{"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0}
)
def test_create_user_mau_limit_reached_passive_admin(self):
"""
Check that an admin can register a new user via the admin API
even if the MAU limit is reached.
Admin user was not active before creating user.
"""
handler = self.hs.get_registration_handler()
# Set monthly active users to the limit
self.store.get_monthly_active_count = Mock(
return_value=make_awaitable(self.hs.config.max_mau_value)
)
# Check that the blocking of monthly active users is working as expected
# The registration of a new user fails due to the limit
self.get_failure(
handler.register_user(localpart="local_part"), ResourceLimitError
)
# Register new user with admin API
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
body = json.dumps({"password": "abc123", "admin": False})
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
# Admin user is not blocked by mau anymore
self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertFalse(channel.json_body["admin"])
@override_config(
{
"email": {
"enable_notifs": True,
"notif_for_new_users": True,
"notif_from": "test@example.com",
},
"public_baseurl": "https://example.com",
}
)
def test_create_user_email_notif_for_new_users(self):
"""
Check that a new regular user is created successfully and
got an email pusher.
"""
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
body = json.dumps(
{
"password": "abc123",
"threepids": [{"medium": "email", "address": "bob@bob.bob"}],
}
)
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
pushers = self.get_success(
self.store.get_pushers_by({"user_name": "@bob:test"})
)
pushers = list(pushers)
self.assertEqual(len(pushers), 1)
self.assertEqual("@bob:test", pushers[0].user_name)
@override_config(
{
"email": {
"enable_notifs": False,
"notif_for_new_users": False,
"notif_from": "test@example.com",
},
"public_baseurl": "https://example.com",
}
)
def test_create_user_email_no_notif_for_new_users(self):
"""
Check that a new regular user is created successfully and
got not an email pusher.
"""
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
body = json.dumps(
{
"password": "abc123",
"threepids": [{"medium": "email", "address": "bob@bob.bob"}],
}
)
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
pushers = self.get_success(
self.store.get_pushers_by({"user_name": "@bob:test"})
)
pushers = list(pushers)
self.assertEqual(len(pushers), 0)
def test_set_password(self):
"""
Test setting a new password for another user.
"""
# Change password
body = json.dumps({"password": "hahaha"})
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
def test_set_displayname(self):
"""
Test setting the displayname of another user.
"""
# Modify user
body = json.dumps({"displayname": "foobar"})
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("foobar", channel.json_body["displayname"])
def test_set_threepid(self):
"""
Test setting threepid for an other user.
"""
# Delete old and add new threepid to user
body = json.dumps(
{"threepids": [{"medium": "email", "address": "bob3@bob.bob"}]}
)
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content=body.encode(encoding="utf_8"),
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"])
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][0]["address"])
def test_deactivate_user(self):
"""
Test deactivating another user.
"""
# set attributes for user
self.get_success(
self.store.set_profile_avatar_url("user", "mxc://servername/mediaid")
)
self.get_success(
self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0)
)
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"])
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User", channel.json_body["displayname"])
# Deactivate user
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"deactivated": True},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
self.assertEqual(0, len(channel.json_body["threepids"]))
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User", channel.json_body["displayname"])
# the user is deactivated, the threepid will be deleted
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
self.assertEqual(0, len(channel.json_body["threepids"]))
self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"])
self.assertEqual("User", channel.json_body["displayname"])
@override_config({"user_directory": {"enabled": True, "search_all_users": True}})
def test_change_name_deactivate_user_user_directory(self):
"""
Test change profile information of a deactivated user and
check that it does not appear in user directory
"""
# is in user directory
profile = self.get_success(self.store.get_user_in_directory(self.other_user))
self.assertTrue(profile["display_name"] == "User")
# Deactivate user
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"deactivated": True},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
# is not in user directory
profile = self.get_success(self.store.get_user_in_directory(self.other_user))
self.assertIsNone(profile)
# Set new displayname user
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"displayname": "Foobar"},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["deactivated"])
self.assertEqual("Foobar", channel.json_body["displayname"])
# is not in user directory
profile = self.get_success(self.store.get_user_in_directory(self.other_user))
self.assertIsNone(profile)
def test_reactivate_user(self):
"""
Test reactivating another user.
"""
# Deactivate the user.
self._deactivate_user("@user:test")
# Attempt to reactivate the user (without a password).
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"deactivated": False},
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
# Reactivate the user.
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"deactivated": False, "password": "foo"},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self.assertIsNotNone(channel.json_body["password_hash"])
self._is_erased("@user:test", False)
@override_config({"password_config": {"localdb_enabled": False}})
def test_reactivate_user_localdb_disabled(self):
"""
Test reactivating another user when using SSO.
"""
# Deactivate the user.
self._deactivate_user("@user:test")
# Reactivate the user with a password
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"deactivated": False, "password": "foo"},
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
# Reactivate the user without a password.
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"deactivated": False},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
self._is_erased("@user:test", False)
@override_config({"password_config": {"enabled": False}})
def test_reactivate_user_password_disabled(self):
"""
Test reactivating another user when using SSO.
"""
# Deactivate the user.
self._deactivate_user("@user:test")
# Reactivate the user with a password
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"deactivated": False, "password": "foo"},
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
# Reactivate the user without a password.
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"deactivated": False},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertFalse(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
self._is_erased("@user:test", False)
def test_set_user_as_admin(self):
"""
Test setting the admin flag on a user.
"""
# Set a user as an admin
channel = self.make_request(
"PUT",
self.url_other_user,
access_token=self.admin_user_tok,
content={"admin": True},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["admin"])
# Get user
channel = self.make_request(
"GET",
self.url_other_user,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@user:test", channel.json_body["name"])
self.assertTrue(channel.json_body["admin"])
def test_accidental_deactivation_prevention(self):
"""
Ensure an account can't accidentally be deactivated by using a str value
for the deactivated body parameter
"""
url = "/_synapse/admin/v2/users/@bob:test"
# Create user
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
content={"password": "abc123"},
)
self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
# Get user
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
self.assertEqual(0, channel.json_body["deactivated"])
# Change password (and use a str for deactivate instead of a bool)
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
content={"password": "abc123", "deactivated": "false"},
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
# Check user is not deactivated
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
# Ensure they're still alive
self.assertEqual(0, channel.json_body["deactivated"])
def _is_erased(self, user_id: str, expect: bool) -> None:
"""Assert that the user is erased or not"""
d = self.store.is_user_erased(user_id)
if expect:
self.assertTrue(self.get_success(d))
else:
self.assertFalse(self.get_success(d))
def _deactivate_user(self, user_id: str) -> None:
"""Deactivate user and set as erased"""
# Deactivate the user.
channel = self.make_request(
"PUT",
"/_synapse/admin/v2/users/%s" % urllib.parse.quote(user_id),
access_token=self.admin_user_tok,
content={"deactivated": True},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertTrue(channel.json_body["deactivated"])
self.assertIsNone(channel.json_body["password_hash"])
self._is_erased(user_id, False)
d = self.store.mark_user_erased(user_id)
self.assertIsNone(self.get_success(d))
self._is_erased(user_id, True)
class UserMembershipRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v1/users/%s/joined_rooms" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self):
"""
Try to list rooms of an user without authentication.
"""
channel = self.make_request("GET", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request(
"GET",
self.url,
access_token=other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self):
"""
Tests that a lookup for a user that does not exist returns an empty list
"""
url = "/_synapse/admin/v1/users/@unknown_person:test/joined_rooms"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["joined_rooms"]))
def test_user_is_not_local(self):
"""
Tests that a lookup for a user that is not a local and participates in no conversation returns an empty list
"""
url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/joined_rooms"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["joined_rooms"]))
def test_no_memberships(self):
"""
Tests that a normal lookup for rooms is successfully
if user has no memberships
"""
# Get rooms
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["joined_rooms"]))
def test_get_rooms(self):
"""
Tests that a normal lookup for rooms is successfully
"""
# Create rooms and join
other_user_tok = self.login("user", "pass")
number_rooms = 5
for _ in range(number_rooms):
self.helper.create_room_as(self.other_user, tok=other_user_tok)
# Get rooms
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(number_rooms, channel.json_body["total"])
self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"]))
def test_get_rooms_with_nonlocal_user(self):
"""
Tests that a normal lookup for rooms is successful with a non-local user
"""
other_user_tok = self.login("user", "pass")
event_builder_factory = self.hs.get_event_builder_factory()
event_creation_handler = self.hs.get_event_creation_handler()
storage = self.hs.get_storage()
# Create two rooms, one with a local user only and one with both a local
# and remote user.
self.helper.create_room_as(self.other_user, tok=other_user_tok)
local_and_remote_room_id = self.helper.create_room_as(
self.other_user, tok=other_user_tok
)
# Add a remote user to the room.
builder = event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": "m.room.member",
"sender": "@joiner:remote_hs",
"state_key": "@joiner:remote_hs",
"room_id": local_and_remote_room_id,
"content": {"membership": "join"},
},
)
event, context = self.get_success(
event_creation_handler.create_new_client_event(builder)
)
self.get_success(storage.persistence.persist_event(event, context))
# Now get rooms
url = "/_synapse/admin/v1/users/@joiner:remote_hs/joined_rooms"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual([local_and_remote_room_id], channel.json_body["joined_rooms"])
class PushersRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v1/users/%s/pushers" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self):
"""
Try to list pushers of an user without authentication.
"""
channel = self.make_request("GET", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request(
"GET",
self.url,
access_token=other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self):
"""
Tests that a lookup for a user that does not exist returns a 404
"""
url = "/_synapse/admin/v1/users/@unknown_person:test/pushers"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self):
"""
Tests that a lookup for a user that is not a local returns a 400
"""
url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/pushers"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_get_pushers(self):
"""
Tests that a normal lookup for pushers is successfully
"""
# Get pushers
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
# Register the pusher
other_user_token = self.login("user", "pass")
user_tuple = self.get_success(
self.store.get_user_by_access_token(other_user_token)
)
token_id = user_tuple.token_id
self.get_success(
self.hs.get_pusherpool().add_pusher(
user_id=self.other_user,
access_token=token_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
device_display_name="pushy push",
pushkey="a@example.com",
lang=None,
data={"url": "https://example.com/_matrix/push/v1/notify"},
)
)
# Get pushers
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
for p in channel.json_body["pushers"]:
self.assertIn("pushkey", p)
self.assertIn("kind", p)
self.assertIn("app_id", p)
self.assertIn("app_display_name", p)
self.assertIn("device_display_name", p)
self.assertIn("profile_tag", p)
self.assertIn("lang", p)
self.assertIn("url", p["data"])
class UserMediaRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.media_repo = hs.get_media_repository_resource()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v1/users/%s/media" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self):
"""
Try to list media of an user without authentication.
"""
channel = self.make_request("GET", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request(
"GET",
self.url,
access_token=other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self):
"""
Tests that a lookup for a user that does not exist returns a 404
"""
url = "/_synapse/admin/v1/users/@unknown_person:test/media"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self):
"""
Tests that a lookup for a user that is not a local returns a 400
"""
url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/media"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_limit(self):
"""
Testing list of media with limit
"""
number_media = 20
other_user_tok = self.login("user", "pass")
self._create_media_for_user(other_user_tok, number_media)
channel = self.make_request(
"GET",
self.url + "?limit=5",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), 5)
self.assertEqual(channel.json_body["next_token"], 5)
self._check_fields(channel.json_body["media"])
def test_from(self):
"""
Testing list of media with a defined starting point (from)
"""
number_media = 20
other_user_tok = self.login("user", "pass")
self._create_media_for_user(other_user_tok, number_media)
channel = self.make_request(
"GET",
self.url + "?from=5",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), 15)
self.assertNotIn("next_token", channel.json_body)
self._check_fields(channel.json_body["media"])
def test_limit_and_from(self):
"""
Testing list of media with a defined starting point and limit
"""
number_media = 20
other_user_tok = self.login("user", "pass")
self._create_media_for_user(other_user_tok, number_media)
channel = self.make_request(
"GET",
self.url + "?from=5&limit=10",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(channel.json_body["next_token"], 15)
self.assertEqual(len(channel.json_body["media"]), 10)
self._check_fields(channel.json_body["media"])
def test_invalid_parameter(self):
"""
If parameters are invalid, an error is returned.
"""
# unkown order_by
channel = self.make_request(
"GET",
self.url + "?order_by=bar",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
# invalid search order
channel = self.make_request(
"GET",
self.url + "?dir=bar",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
# negative limit
channel = self.make_request(
"GET",
self.url + "?limit=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from
channel = self.make_request(
"GET",
self.url + "?from=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
def test_next_token(self):
"""
Testing that `next_token` appears at the right place
"""
number_media = 20
other_user_tok = self.login("user", "pass")
self._create_media_for_user(other_user_tok, number_media)
# `next_token` does not appear
# Number of results is the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=20",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), number_media)
self.assertNotIn("next_token", channel.json_body)
# `next_token` does not appear
# Number of max results is larger than the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=21",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), number_media)
self.assertNotIn("next_token", channel.json_body)
# `next_token` does appear
# Number of max results is smaller than the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=19",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), 19)
self.assertEqual(channel.json_body["next_token"], 19)
# Check
# Set `from` to value of `next_token` for request remaining entries
# `next_token` does not appear
channel = self.make_request(
"GET",
self.url + "?from=19",
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(channel.json_body["total"], number_media)
self.assertEqual(len(channel.json_body["media"]), 1)
self.assertNotIn("next_token", channel.json_body)
def test_user_has_no_media(self):
"""
Tests that a normal lookup for media is successfully
if user has no media created
"""
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["media"]))
def test_get_media(self):
"""
Tests that a normal lookup for media is successfully
"""
number_media = 5
other_user_tok = self.login("user", "pass")
self._create_media_for_user(other_user_tok, number_media)
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(number_media, channel.json_body["total"])
self.assertEqual(number_media, len(channel.json_body["media"]))
self.assertNotIn("next_token", channel.json_body)
self._check_fields(channel.json_body["media"])
def test_order_by(self):
"""
Testing order list with parameter `order_by`
"""
other_user_tok = self.login("user", "pass")
# Resolution: 1×1, MIME type: image/png, Extension: png, Size: 67 B
image_data1 = unhexlify(
b"89504e470d0a1a0a0000000d4948445200000001000000010806"
b"0000001f15c4890000000a49444154789c63000100000500010d"
b"0a2db40000000049454e44ae426082"
)
# Resolution: 1×1, MIME type: image/gif, Extension: gif, Size: 35 B
image_data2 = unhexlify(
b"47494638376101000100800100000000"
b"ffffff2c00000000010001000002024c"
b"01003b"
)
# Resolution: 1×1, MIME type: image/bmp, Extension: bmp, Size: 54 B
image_data3 = unhexlify(
b"424d3a0000000000000036000000280000000100000001000000"
b"0100180000000000040000000000000000000000000000000000"
b"0000"
)
# create media and make sure they do not have the same timestamp
media1 = self._create_media_and_access(other_user_tok, image_data1, "image.png")
self.pump(1.0)
media2 = self._create_media_and_access(other_user_tok, image_data2, "image.gif")
self.pump(1.0)
media3 = self._create_media_and_access(other_user_tok, image_data3, "image.bmp")
self.pump(1.0)
# Mark one media as safe from quarantine.
self.get_success(self.store.mark_local_media_as_safe(media2))
# Quarantine one media
self.get_success(
self.store.quarantine_media_by_id("test", media3, self.admin_user)
)
# order by default ("created_ts")
# default is backwards
self._order_test([media3, media2, media1], None)
self._order_test([media1, media2, media3], None, "f")
self._order_test([media3, media2, media1], None, "b")
# sort by media_id
sorted_media = sorted([media1, media2, media3], reverse=False)
sorted_media_reverse = sorted(sorted_media, reverse=True)
# order by media_id
self._order_test(sorted_media, "media_id")
self._order_test(sorted_media, "media_id", "f")
self._order_test(sorted_media_reverse, "media_id", "b")
# order by upload_name
self._order_test([media3, media2, media1], "upload_name")
self._order_test([media3, media2, media1], "upload_name", "f")
self._order_test([media1, media2, media3], "upload_name", "b")
# order by media_type
# result is ordered by media_id
# because of uploaded media_type is always 'application/json'
self._order_test(sorted_media, "media_type")
self._order_test(sorted_media, "media_type", "f")
self._order_test(sorted_media, "media_type", "b")
# order by media_length
self._order_test([media2, media3, media1], "media_length")
self._order_test([media2, media3, media1], "media_length", "f")
self._order_test([media1, media3, media2], "media_length", "b")
# order by created_ts
self._order_test([media1, media2, media3], "created_ts")
self._order_test([media1, media2, media3], "created_ts", "f")
self._order_test([media3, media2, media1], "created_ts", "b")
# order by last_access_ts
self._order_test([media1, media2, media3], "last_access_ts")
self._order_test([media1, media2, media3], "last_access_ts", "f")
self._order_test([media3, media2, media1], "last_access_ts", "b")
# order by quarantined_by
# one media is in quarantine, others are ordered by media_ids
# Different sort order of SQlite and PostreSQL
# If a media is not in quarantine `quarantined_by` is NULL
# SQLite considers NULL to be smaller than any other value.
# PostreSQL considers NULL to be larger than any other value.
# self._order_test(sorted([media1, media2]) + [media3], "quarantined_by")
# self._order_test(sorted([media1, media2]) + [media3], "quarantined_by", "f")
# self._order_test([media3] + sorted([media1, media2]), "quarantined_by", "b")
# order by safe_from_quarantine
# one media is safe from quarantine, others are ordered by media_ids
self._order_test(sorted([media1, media3]) + [media2], "safe_from_quarantine")
self._order_test(
sorted([media1, media3]) + [media2], "safe_from_quarantine", "f"
)
self._order_test(
[media2] + sorted([media1, media3]), "safe_from_quarantine", "b"
)
def _create_media_for_user(self, user_token: str, number_media: int):
"""
Create a number of media for a specific user
Args:
user_token: Access token of the user
number_media: Number of media to be created for the user
"""
for _ in range(number_media):
# file size is 67 Byte
image_data = unhexlify(
b"89504e470d0a1a0a0000000d4948445200000001000000010806"
b"0000001f15c4890000000a49444154789c63000100000500010d"
b"0a2db40000000049454e44ae426082"
)
self._create_media_and_access(user_token, image_data)
def _create_media_and_access(
self,
user_token: str,
image_data: bytes,
filename: str = "image1.png",
) -> str:
"""
Create one media for a specific user, access and returns `media_id`
Args:
user_token: Access token of the user
image_data: binary data of image
filename: The filename of the media to be uploaded
Returns:
The ID of the newly created media.
"""
upload_resource = self.media_repo.children[b"upload"]
download_resource = self.media_repo.children[b"download"]
# Upload some media into the room
response = self.helper.upload_media(
upload_resource, image_data, user_token, filename, expect_code=200
)
# Extract media ID from the response
server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
media_id = server_and_media_id.split("/")[1]
# Try to access a media and to create `last_access_ts`
channel = make_request(
self.reactor,
FakeSite(download_resource),
"GET",
server_and_media_id,
shorthand=False,
access_token=user_token,
)
self.assertEqual(
200,
channel.code,
msg=(
"Expected to receive a 200 on accessing media: %s" % server_and_media_id
),
)
return media_id
def _check_fields(self, content: JsonDict):
"""Checks that the expected user attributes are present in content
Args:
content: List that is checked for content
"""
for m in content:
self.assertIn("media_id", m)
self.assertIn("media_type", m)
self.assertIn("media_length", m)
self.assertIn("upload_name", m)
self.assertIn("created_ts", m)
self.assertIn("last_access_ts", m)
self.assertIn("quarantined_by", m)
self.assertIn("safe_from_quarantine", m)
def _order_test(
self,
expected_media_list: List[str],
order_by: Optional[str],
dir: Optional[str] = None,
):
"""Request the list of media in a certain order. Assert that order is what
we expect
Args:
expected_media_list: The list of media_ids in the order we expect to get
back from the server
order_by: The type of ordering to give the server
dir: The direction of ordering to give the server
"""
url = self.url + "?"
if order_by is not None:
url += "order_by=%s&" % (order_by,)
if dir is not None and dir in ("b", "f"):
url += "dir=%s" % (dir,)
channel = self.make_request(
"GET",
url.encode("ascii"),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], len(expected_media_list))
returned_order = [row["media_id"] for row in channel.json_body["media"]]
self.assertEqual(expected_media_list, returned_order)
self._check_fields(channel.json_body["media"])
class UserTokenRestTestCase(unittest.HomeserverTestCase):
"""Test for /_synapse/admin/v1/users/<user>/login"""
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
sync.register_servlets,
room.register_servlets,
devices.register_servlets,
logout.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.other_user_tok = self.login("user", "pass")
self.url = "/_synapse/admin/v1/users/%s/login" % urllib.parse.quote(
self.other_user
)
def _get_token(self) -> str:
channel = self.make_request(
"POST", self.url, b"{}", access_token=self.admin_user_tok
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
return channel.json_body["access_token"]
def test_no_auth(self):
"""Try to login as a user without authentication."""
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_not_admin(self):
"""Try to login as a user as a non-admin user."""
channel = self.make_request(
"POST", self.url, b"{}", access_token=self.other_user_tok
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
def test_send_event(self):
"""Test that sending event as a user works."""
# Create a room.
room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok)
# Login in as the user
puppet_token = self._get_token()
# Test that sending works, and generates the event as the right user.
resp = self.helper.send_event(room_id, "com.example.test", tok=puppet_token)
event_id = resp["event_id"]
event = self.get_success(self.store.get_event(event_id))
self.assertEqual(event.sender, self.other_user)
def test_devices(self):
"""Tests that logging in as a user doesn't create a new device for them."""
# Login in as the user
self._get_token()
# Check that we don't see a new device in our devices list
channel = self.make_request(
"GET", "devices", b"{}", access_token=self.other_user_tok
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# We should only see the one device (from the login in `prepare`)
self.assertEqual(len(channel.json_body["devices"]), 1)
def test_logout(self):
"""Test that calling `/logout` with the token works."""
# Login in as the user
puppet_token = self._get_token()
# Test that we can successfully make a request
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# Logout with the puppet token
channel = self.make_request("POST", "logout", b"{}", access_token=puppet_token)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# The puppet token should no longer work
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
# .. but the real user's tokens should still work
channel = self.make_request(
"GET", "devices", b"{}", access_token=self.other_user_tok
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
def test_user_logout_all(self):
"""Tests that the target user calling `/logout/all` does *not* expire
the token.
"""
# Login in as the user
puppet_token = self._get_token()
# Test that we can successfully make a request
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# Logout all with the real user token
channel = self.make_request(
"POST", "logout/all", b"{}", access_token=self.other_user_tok
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# The puppet token should still work
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# .. but the real user's tokens shouldn't
channel = self.make_request(
"GET", "devices", b"{}", access_token=self.other_user_tok
)
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
def test_admin_logout_all(self):
"""Tests that the admin user calling `/logout/all` does expire the
token.
"""
# Login in as the user
puppet_token = self._get_token()
# Test that we can successfully make a request
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# Logout all with the admin user token
channel = self.make_request(
"POST", "logout/all", b"{}", access_token=self.admin_user_tok
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# The puppet token should no longer work
channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token)
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
# .. but the real user's tokens should still work
channel = self.make_request(
"GET", "devices", b"{}", access_token=self.other_user_tok
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
@unittest.override_config(
{
"public_baseurl": "https://example.org/",
"user_consent": {
"version": "1.0",
"policy_name": "My Cool Privacy Policy",
"template_dir": "/",
"require_at_registration": True,
"block_events_error": "You should accept the policy",
},
"form_secret": "123secret",
}
)
def test_consent(self):
"""Test that sending a message is not subject to the privacy policies."""
# Have the admin user accept the terms.
self.get_success(self.store.user_set_consent_version(self.admin_user, "1.0"))
# First, cheekily accept the terms and create a room
self.get_success(self.store.user_set_consent_version(self.other_user, "1.0"))
room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok)
self.helper.send_event(room_id, "com.example.test", tok=self.other_user_tok)
# Now unaccept it and check that we can't send an event
self.get_success(self.store.user_set_consent_version(self.other_user, "0.0"))
self.helper.send_event(
room_id, "com.example.test", tok=self.other_user_tok, expect_code=403
)
# Login in as the user
puppet_token = self._get_token()
# Sending an event on their behalf should work fine
self.helper.send_event(room_id, "com.example.test", tok=puppet_token)
@override_config(
{"limit_usage_by_mau": True, "max_mau_value": 1, "mau_trial_days": 0}
)
def test_mau_limit(self):
# Create a room as the admin user. This will bump the monthly active users to 1.
room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
# Trying to join as the other user should fail due to reaching MAU limit.
self.helper.join(
room_id, user=self.other_user, tok=self.other_user_tok, expect_code=403
)
# Logging in as the other user and joining a room should work, even
# though the MAU limit would stop the user doing so.
puppet_token = self._get_token()
self.helper.join(room_id, user=self.other_user, tok=puppet_token)
class WhoisRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url1 = "/_synapse/admin/v1/whois/%s" % urllib.parse.quote(self.other_user)
self.url2 = "/_matrix/client/r0/admin/whois/%s" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self):
"""
Try to get information of an user without authentication.
"""
channel = self.make_request("GET", self.url1, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
channel = self.make_request("GET", self.url2, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_not_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
self.register_user("user2", "pass")
other_user2_token = self.login("user2", "pass")
channel = self.make_request(
"GET",
self.url1,
access_token=other_user2_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
channel = self.make_request(
"GET",
self.url2,
access_token=other_user2_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_is_not_local(self):
"""
Tests that a lookup for a user that is not a local returns a 400
"""
url1 = "/_synapse/admin/v1/whois/@unknown_person:unknown_domain"
url2 = "/_matrix/client/r0/admin/whois/@unknown_person:unknown_domain"
channel = self.make_request(
"GET",
url1,
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only whois a local user", channel.json_body["error"])
channel = self.make_request(
"GET",
url2,
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only whois a local user", channel.json_body["error"])
def test_get_whois_admin(self):
"""
The lookup should succeed for an admin.
"""
channel = self.make_request(
"GET",
self.url1,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
self.assertIn("devices", channel.json_body)
channel = self.make_request(
"GET",
self.url2,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
self.assertIn("devices", channel.json_body)
def test_get_whois_user(self):
"""
The lookup should succeed for a normal user looking up their own information.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request(
"GET",
self.url1,
access_token=other_user_token,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
self.assertIn("devices", channel.json_body)
channel = self.make_request(
"GET",
self.url2,
access_token=other_user_token,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
self.assertIn("devices", channel.json_body)
class ShadowBanRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v1/users/%s/shadow_ban" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self):
"""
Try to get information of an user without authentication.
"""
channel = self.make_request("POST", self.url)
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_not_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request("POST", self.url, access_token=other_user_token)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_is_not_local(self):
"""
Tests that shadow-banning for a user that is not a local returns a 400
"""
url = "/_synapse/admin/v1/whois/@unknown_person:unknown_domain"
channel = self.make_request("POST", url, access_token=self.admin_user_tok)
self.assertEqual(400, channel.code, msg=channel.json_body)
def test_success(self):
"""
Shadow-banning should succeed for an admin.
"""
# The user starts off as not shadow-banned.
other_user_token = self.login("user", "pass")
result = self.get_success(self.store.get_user_by_access_token(other_user_token))
self.assertFalse(result.shadow_banned)
channel = self.make_request("POST", self.url, access_token=self.admin_user_tok)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual({}, channel.json_body)
# Ensure the user is shadow-banned (and the cache was cleared).
result = self.get_success(self.store.get_user_by_access_token(other_user_token))
self.assertTrue(result.shadow_banned)
class RateLimitTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = (
"/_synapse/admin/v1/users/%s/override_ratelimit"
% urllib.parse.quote(self.other_user)
)
def test_no_auth(self):
"""
Try to get information of a user without authentication.
"""
channel = self.make_request("GET", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
channel = self.make_request("DELETE", self.url, b"{}")
self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self):
"""
If the user is not a server admin, an error is returned.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request(
"GET",
self.url,
access_token=other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
channel = self.make_request(
"POST",
self.url,
access_token=other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
channel = self.make_request(
"DELETE",
self.url,
access_token=other_user_token,
)
self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self):
"""
Tests that a lookup for a user that does not exist returns a 404
"""
url = "/_synapse/admin/v1/users/@unknown_person:test/override_ratelimit"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
channel = self.make_request(
"POST",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
channel = self.make_request(
"DELETE",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self):
"""
Tests that a lookup for a user that is not a local returns a 400
"""
url = (
"/_synapse/admin/v1/users/@unknown_person:unknown_domain/override_ratelimit"
)
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
channel = self.make_request(
"POST",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"Only local users can be ratelimited", channel.json_body["error"]
)
channel = self.make_request(
"DELETE",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"Only local users can be ratelimited", channel.json_body["error"]
)
def test_invalid_parameter(self):
"""
If parameters are invalid, an error is returned.
"""
# messages_per_second is a string
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"messages_per_second": "string"},
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# messages_per_second is negative
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"messages_per_second": -1},
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# burst_count is a string
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"burst_count": "string"},
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# burst_count is negative
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"burst_count": -1},
)
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
def test_return_zero_when_null(self):
"""
If values in database are `null` API should return an int `0`
"""
self.get_success(
self.store.db_pool.simple_upsert(
table="ratelimit_override",
keyvalues={"user_id": self.other_user},
values={
"messages_per_second": None,
"burst_count": None,
},
)
)
# request status
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(0, channel.json_body["messages_per_second"])
self.assertEqual(0, channel.json_body["burst_count"])
def test_success(self):
"""
Rate-limiting (set/update/delete) should succeed for an admin.
"""
# request status
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertNotIn("messages_per_second", channel.json_body)
self.assertNotIn("burst_count", channel.json_body)
# set ratelimit
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"messages_per_second": 10, "burst_count": 11},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(10, channel.json_body["messages_per_second"])
self.assertEqual(11, channel.json_body["burst_count"])
# update ratelimit
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"messages_per_second": 20, "burst_count": 21},
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(20, channel.json_body["messages_per_second"])
self.assertEqual(21, channel.json_body["burst_count"])
# request status
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(20, channel.json_body["messages_per_second"])
self.assertEqual(21, channel.json_body["burst_count"])
# delete ratelimit
channel = self.make_request(
"DELETE",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertNotIn("messages_per_second", channel.json_body)
self.assertNotIn("burst_count", channel.json_body)
# request status
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertNotIn("messages_per_second", channel.json_body)
self.assertNotIn("burst_count", channel.json_body)
|
bgxavier/nova
|
refs/heads/master
|
nova/image/download/__init__.py
|
61
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import stevedore.driver
import stevedore.extension
from nova.i18n import _LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def load_transfer_modules():
module_dictionary = {}
ex = stevedore.extension.ExtensionManager('nova.image.download.modules')
for module_name in ex.names():
mgr = stevedore.driver.DriverManager(
namespace='nova.image.download.modules',
name=module_name,
invoke_on_load=False)
schemes_list = mgr.driver.get_schemes()
for scheme in schemes_list:
if scheme in module_dictionary:
LOG.error(_LE('%(scheme)s is registered as a module twice. '
'%(module_name)s is not being used.'),
{'scheme': scheme,
'module_name': module_name})
else:
module_dictionary[scheme] = mgr.driver
return module_dictionary
|
mquandalle/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/testing/gtest/scripts/upload_gtest.py
|
1963
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
--cc=googletestframework@googlegroups.com to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = 'googletestframework@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
loseblue/vim-ycm-windows-64
|
refs/heads/master
|
third_party/requests/requests/packages/chardet/sbcharsetprober.py
|
2926
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
|
ethan1341/angular-rc5
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/buildbot/buildbot_run.py
|
1467
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import os
import shutil
import subprocess
import sys
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
if sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
|
kustomzone/Rusthon
|
refs/heads/master
|
regtests/dict/contains.py
|
6
|
"""key in dict"""
def main():
a = {'2': 22, 3:33}
TestError( '2' in a )
TestError( 3 in a )
|
slowfranklin/samba
|
refs/heads/master
|
source4/torture/libnet/python/samr-test.py
|
67
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Unix SMB/CIFS implementation.
# Copyright (C) Kamen Mazdrashki <kamen.mazdrashki@postpath.com> 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Usage:
# export ACCOUNT_NAME=kamen
# export NEW_PASS=test
# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
# PYTHONPATH="$samba4srcdir/torture/libnet/python" $SUBUNITRUN samr-test -Ukma-exch.devel/Administrator%333
#
import os
from samba import net
import samba.tests
if not "ACCOUNT_NAME" in os.environ.keys():
raise Exception("Please supply ACCOUNT_NAME in environment")
if not "NEW_PASS" in os.environ.keys():
raise Exception("Please supply NEW_PASS in environment")
account_name = os.environ["ACCOUNT_NAME"]
new_pass = os.environ["NEW_PASS"]
#
# Tests start here
#
class Libnet_SetPwdTest(samba.tests.TestCase):
########################################################################################
def test_SetPassword(self):
creds = self.get_credentials()
net.SetPassword(account_name=account_name,
domain_name=creds.get_domain(),
newpassword=new_pass,
credentials=creds)
########################################################################################
|
alexmorozov/django
|
refs/heads/master
|
tests/utils_tests/test_module/good_module.py
|
582
|
content = 'Good Module'
|
h3biomed/ansible
|
refs/heads/h3
|
lib/ansible/modules/crypto/openssl_privatekey.py
|
4
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: openssl_privatekey
version_added: "2.3"
short_description: Generate OpenSSL private keys
description:
- This module allows one to (re)generate OpenSSL private keys.
- One can generate L(RSA,https://en.wikipedia.org/wiki/RSA_(cryptosystem)),
L(DSA,https://en.wikipedia.org/wiki/Digital_Signature_Algorithm),
L(ECC,https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) or
L(EdDSA,https://en.wikipedia.org/wiki/EdDSA) private keys.
- Keys are generated in PEM format.
- "Please note that the module regenerates private keys if they don't match
the module's options. In particular, if you provide another passphrase
(or specify none), change the keysize, etc., the private key will be
regenerated. If you are concerned that this could **overwrite your private key**,
consider using the I(backup) option."
- The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
size:
description:
- Size (in bits) of the TLS/SSL key to generate.
type: int
default: 4096
type:
description:
- The algorithm used to generate the TLS/SSL private key.
- Note that C(ECC), C(X25519), C(X448), C(Ed25519) and C(Ed448) require the C(cryptography) backend.
C(X25519) needs cryptography 2.5 or newer, while C(X448), C(Ed25519) and C(Ed448) require
cryptography 2.6 or newer. For C(ECC), the minimal cryptography version required depends on the
I(curve) option.
type: str
default: RSA
choices: [ DSA, ECC, Ed25519, Ed448, RSA, X25519, X448 ]
curve:
description:
- Note that not all curves are supported by all versions of C(cryptography).
- For maximal interoperability, C(secp384r1) or C(secp256r1) should be used.
- We use the curve names as defined in the
L(IANA registry for TLS,https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8).
type: str
choices:
- secp384r1
- secp521r1
- secp224r1
- secp192r1
- secp256r1
- secp256k1
- brainpoolP256r1
- brainpoolP384r1
- brainpoolP512r1
- sect571k1
- sect409k1
- sect283k1
- sect233k1
- sect163k1
- sect571r1
- sect409r1
- sect283r1
- sect233r1
- sect163r2
version_added: "2.8"
force:
description:
- Should the key be regenerated even if it already exists.
type: bool
default: no
path:
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
type: path
required: true
passphrase:
description:
- The passphrase for the private key.
type: str
version_added: "2.4"
cipher:
description:
- The cipher to encrypt the private key. (Valid values can be found by
running `openssl list -cipher-algorithms` or `openssl list-cipher-algorithms`,
depending on your OpenSSL version.)
- When using the C(cryptography) backend, use C(auto).
type: str
version_added: "2.4"
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
version_added: "2.8"
backup:
description:
- Create a backup file including a timestamp so you can get
the original private key back if you overwrote it with a new one by accident.
type: bool
default: no
version_added: "2.8"
extends_documentation_fragment:
- files
seealso:
- module: openssl_certificate
- module: openssl_csr
- module: openssl_dhparam
- module: openssl_pkcs12
- module: openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
- name: Generate an OpenSSL private key with a different size (2048 bits)
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
- name: Force regenerate an OpenSSL private key if it already exists
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Generate an OpenSSL private key with a different algorithm (DSA)
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = r'''
size:
description: Size (in bits) of the TLS/SSL private key.
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key.
returned: changed or success
type: str
sample: RSA
curve:
description: Elliptic curve used to generate the TLS/SSL private key.
returned: changed or success, and I(type) is C(ECC)
type: str
sample: secp256r1
filename:
description: Path to the generated TLS/SSL private key file.
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available.
- The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/privatekey.pem.2019-03-09@11:22~
'''
import abc
import os
import traceback
from distutils.version import LooseVersion
MINIMAL_PYOPENSSL_VERSION = '0.6'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.dsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.utils
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
try:
import cryptography.hazmat.primitives.asymmetric.x25519
CRYPTOGRAPHY_HAS_X25519 = True
try:
cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.private_bytes
CRYPTOGRAPHY_HAS_X25519_FULL = True
except AttributeError:
CRYPTOGRAPHY_HAS_X25519_FULL = False
except ImportError:
CRYPTOGRAPHY_HAS_X25519 = False
CRYPTOGRAPHY_HAS_X25519_FULL = False
try:
import cryptography.hazmat.primitives.asymmetric.x448
CRYPTOGRAPHY_HAS_X448 = True
except ImportError:
CRYPTOGRAPHY_HAS_X448 = False
try:
import cryptography.hazmat.primitives.asymmetric.ed25519
CRYPTOGRAPHY_HAS_ED25519 = True
except ImportError:
CRYPTOGRAPHY_HAS_ED25519 = False
try:
import cryptography.hazmat.primitives.asymmetric.ed448
CRYPTOGRAPHY_HAS_ED448 = True
except ImportError:
CRYPTOGRAPHY_HAS_ED448 = False
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six import string_types
class PrivateKeyError(crypto_utils.OpenSSLObjectError):
pass
class PrivateKeyBase(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(PrivateKeyBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.backup = module.params['backup']
self.backup_file = None
if module.params['mode'] is None:
module.params['mode'] = '0600'
@abc.abstractmethod
def _generate_private_key_data(self):
pass
@abc.abstractmethod
def _get_fingerprint(self):
pass
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False) or self.force:
if self.backup:
self.backup_file = module.backup_local(self.path)
privatekey_data = self._generate_private_key_data()
crypto_utils.write_file(module, privatekey_data, 0o600)
self.changed = True
self.fingerprint = self._get_fingerprint()
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PrivateKeyBase, self).remove(module)
@abc.abstractmethod
def _check_passphrase(self):
pass
@abc.abstractmethod
def _check_size_and_type(self):
pass
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKeyBase, self).check(module, perms_required)
if not state_and_perms or not self._check_passphrase():
return False
return self._check_size_and_type()
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
return result
# Implementation with using pyOpenSSL
class PrivateKeyPyOpenSSL(PrivateKeyBase):
def __init__(self, module):
super(PrivateKeyPyOpenSSL, self).__init__(module)
if module.params['type'] == 'RSA':
self.type = crypto.TYPE_RSA
elif module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
else:
module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.")
def _generate_private_key_data(self):
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
if self.cipher and self.passphrase:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase))
else:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey)
def _get_fingerprint(self):
return crypto_utils.get_fingerprint(self.path, self.passphrase)
def _check_passphrase(self):
try:
crypto_utils.load_privatekey(self.path, self.passphrase)
return True
except Exception as dummy:
return False
def _check_size_and_type(self):
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
try:
privatekey = crypto_utils.load_privatekey(self.path, self.passphrase)
except crypto_utils.OpenSSLBadPassphraseError as exc:
raise PrivateKeyError(exc)
return _check_size(privatekey) and _check_type(privatekey)
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyPyOpenSSL, self).dump()
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
# Implementation with using cryptography
class PrivateKeyCryptography(PrivateKeyBase):
def _get_ec_class(self, ectype):
ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype)
if ecclass is None:
self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype))
return ecclass
def _add_curve(self, name, ectype, deprecated=False):
def create(size):
ecclass = self._get_ec_class(ectype)
return ecclass()
def verify(privatekey):
ecclass = self._get_ec_class(ectype)
return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass)
self.curves[name] = {
'create': create,
'verify': verify,
'deprecated': deprecated,
}
def __init__(self, module):
super(PrivateKeyCryptography, self).__init__(module)
self.curves = dict()
self._add_curve('secp384r1', 'SECP384R1')
self._add_curve('secp521r1', 'SECP521R1')
self._add_curve('secp224r1', 'SECP224R1')
self._add_curve('secp192r1', 'SECP192R1')
self._add_curve('secp256r1', 'SECP256R1')
self._add_curve('secp256k1', 'SECP256K1')
self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True)
self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True)
self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True)
self._add_curve('sect571k1', 'SECT571K1', deprecated=True)
self._add_curve('sect409k1', 'SECT409K1', deprecated=True)
self._add_curve('sect283k1', 'SECT283K1', deprecated=True)
self._add_curve('sect233k1', 'SECT233K1', deprecated=True)
self._add_curve('sect163k1', 'SECT163K1', deprecated=True)
self._add_curve('sect571r1', 'SECT571R1', deprecated=True)
self._add_curve('sect409r1', 'SECT409R1', deprecated=True)
self._add_curve('sect283r1', 'SECT283R1', deprecated=True)
self._add_curve('sect233r1', 'SECT233R1', deprecated=True)
self._add_curve('sect163r2', 'SECT163R2', deprecated=True)
self.module = module
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.type = module.params['type']
self.curve = module.params['curve']
if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519')
if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519 serialization')
if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.module.fail_json(msg='Your cryptography version does not support X448')
if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.module.fail_json(msg='Your cryptography version does not support Ed25519')
if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.module.fail_json(msg='Your cryptography version does not support Ed448')
def _generate_private_key_data(self):
format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL
try:
if self.type == 'RSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537, # OpenSSL always uses this
key_size=self.size,
backend=self.cryptography_backend
)
if self.type == 'DSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key(
key_size=self.size,
backend=self.cryptography_backend
)
if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate()
format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate()
format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()
format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate()
format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
if self.type == 'ECC' and self.curve in self.curves:
if self.curves[self.curve]['deprecated']:
self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve))
self.privatekey = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key(
curve=self.curves[self.curve]['create'](self.size),
backend=self.cryptography_backend
)
except cryptography.exceptions.UnsupportedAlgorithm as e:
self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type))
# Select key encryption
encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption()
if self.cipher and self.passphrase:
if self.cipher == 'auto':
encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase))
else:
self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.')
# Serialize key
return self.privatekey.private_bytes(
encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM,
format=format,
encryption_algorithm=encryption_algorithm
)
def _load_privatekey(self):
try:
with open(self.path, 'rb') as f:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
f.read(),
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise PrivateKeyError(e)
def _get_fingerprint(self):
# Get bytes of public key
private_key = self._load_privatekey()
public_key = private_key.public_key()
public_key_bytes = public_key.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
# Get fingerprints of public_key_bytes
return crypto_utils.get_fingerprint_of_bytes(public_key_bytes)
def _check_passphrase(self):
try:
with open(self.path, 'rb') as f:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
f.read(),
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
return True
except Exception as dummy:
return False
def _check_size_and_type(self):
privatekey = self._load_privatekey()
if isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return self.type == 'RSA' and self.size == privatekey.key_size
if isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
return self.type == 'DSA' and self.size == privatekey.key_size
if CRYPTOGRAPHY_HAS_X25519 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
return self.type == 'X25519'
if CRYPTOGRAPHY_HAS_X448 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
return self.type == 'X448'
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
return self.type == 'Ed25519'
if CRYPTOGRAPHY_HAS_ED448 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
return self.type == 'Ed448'
if isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
if self.type != 'ECC':
return False
if self.curve not in self.curves:
return False
return self.curves[self.curve]['verify'](privatekey)
return False
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyCryptography, self).dump()
result['type'] = self.type
if self.type == 'ECC':
result['curve'] = self.curve
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
size=dict(type='int', default=4096),
type=dict(type='str', default='RSA', choices=[
'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448'
]),
curve=dict(type='str', choices=[
'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256r1',
'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1',
'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1',
'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2',
]),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
backup=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[
['cipher', 'passphrase']
],
required_if=[
['type', 'ECC', ['curve']],
],
)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto':
# First try pyOpenSSL, then cryptography
if can_use_pyopenssl:
backend = 'pyopenssl'
elif can_use_cryptography:
backend = 'cryptography'
else:
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
private_key = PrivateKeyPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
private_key = PrivateKeyCryptography(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = module.params['force'] or not private_key.check(module)
module.exit_json(**result)
private_key.generate(module)
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
private_key.remove(module)
result = private_key.dump()
module.exit_json(**result)
except crypto_utils.OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
|
Transtech/omim
|
refs/heads/master
|
3party/protobuf/python/google/protobuf/service.py
|
242
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DEPRECATED: Declares the RPC service interfaces.
This module declares the abstract interfaces underlying proto2 RPC
services. These are intended to be independent of any particular RPC
implementation, so that proto2 services can be used on top of a variety
of implementations. Starting with version 2.3.0, RPC implementations should
not try to build on these, but should instead provide code generator plugins
which generate code specific to the particular RPC implementation. This way
the generated code can be more appropriate for the implementation in use
and can avoid unnecessary layers of indirection.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class RpcException(Exception):
"""Exception raised on failed blocking RPC method call."""
pass
class Service(object):
"""Abstract base interface for protocol-buffer-based RPC services.
Services themselves are abstract classes (implemented either by servers or as
stubs), but they subclass this base interface. The methods of this
interface can be used to call the methods of the service without knowing
its exact type at compile time (analogous to the Message interface).
"""
def GetDescriptor():
"""Retrieves this service's descriptor."""
raise NotImplementedError
def CallMethod(self, method_descriptor, rpc_controller,
request, done):
"""Calls a method of the service specified by method_descriptor.
If "done" is None then the call is blocking and the response
message will be returned directly. Otherwise the call is asynchronous
and "done" will later be called with the response value.
In the blocking case, RpcException will be raised on error.
Preconditions:
* method_descriptor.service == GetDescriptor
* request is of the exact same classes as returned by
GetRequestClass(method).
* After the call has started, the request must not be modified.
* "rpc_controller" is of the correct type for the RPC implementation being
used by this Service. For stubs, the "correct type" depends on the
RpcChannel which the stub is using.
Postconditions:
* "done" will be called when the method is complete. This may be
before CallMethod() returns or it may be at some point in the future.
* If the RPC failed, the response value passed to "done" will be None.
Further details about the failure can be found by querying the
RpcController.
"""
raise NotImplementedError
def GetRequestClass(self, method_descriptor):
"""Returns the class of the request message for the specified method.
CallMethod() requires that the request is of a particular subclass of
Message. GetRequestClass() gets the default instance of this required
type.
Example:
method = service.GetDescriptor().FindMethodByName("Foo")
request = stub.GetRequestClass(method)()
request.ParseFromString(input)
service.CallMethod(method, request, callback)
"""
raise NotImplementedError
def GetResponseClass(self, method_descriptor):
"""Returns the class of the response message for the specified method.
This method isn't really needed, as the RpcChannel's CallMethod constructs
the response protocol message. It's provided anyway in case it is useful
for the caller to know the response type in advance.
"""
raise NotImplementedError
class RpcController(object):
"""An RpcController mediates a single method call.
The primary purpose of the controller is to provide a way to manipulate
settings specific to the RPC implementation and to find out about RPC-level
errors. The methods provided by the RpcController interface are intended
to be a "least common denominator" set of features which we expect all
implementations to support. Specific implementations may provide more
advanced features (e.g. deadline propagation).
"""
# Client-side methods below
def Reset(self):
"""Resets the RpcController to its initial state.
After the RpcController has been reset, it may be reused in
a new call. Must not be called while an RPC is in progress.
"""
raise NotImplementedError
def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError
def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError
def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError
# Server-side methods below
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
def IsCanceled(self):
"""Checks if the client cancelled the RPC.
If true, indicates that the client canceled the RPC, so the server may
as well give up on replying to it. The server should still call the
final "done" callback.
"""
raise NotImplementedError
def NotifyOnCancel(self, callback):
"""Sets a callback to invoke on cancel.
Asks that the given callback be called when the RPC is canceled. The
callback will always be called exactly once. If the RPC completes without
being canceled, the callback will be called after completion. If the RPC
has already been canceled when NotifyOnCancel() is called, the callback
will be called immediately.
NotifyOnCancel() must be called no more than once per request.
"""
raise NotImplementedError
class RpcChannel(object):
"""Abstract interface for an RPC channel.
An RpcChannel represents a communication line to a service which can be used
to call that service's methods. The service may be running on another
machine. Normally, you should not use an RpcChannel directly, but instead
construct a stub {@link Service} wrapping it. Example:
Example:
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
RpcController controller = rpcImpl.Controller()
MyService service = MyService_Stub(channel)
service.MyMethod(controller, request, callback)
"""
def CallMethod(self, method_descriptor, rpc_controller,
request, response_class, done):
"""Calls the method identified by the descriptor.
Call the given method of the remote service. The signature of this
procedure looks the same as Service.CallMethod(), but the requirements
are less strict in one important way: the request object doesn't have to
be of any specific class as long as its descriptor is method.input_type.
"""
raise NotImplementedError
|
pomegranited/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor/paidcourse_enrollment_report.py
|
38
|
"""
Defines concrete class for cybersource Enrollment Report.
"""
from courseware.access import has_access
import collections
from django.conf import settings
from django.utils.translation import ugettext as _
from courseware.courses import get_course_by_id
from instructor.enrollment_report import BaseAbstractEnrollmentReportProvider
from microsite_configuration import microsite
from shoppingcart.models import RegistrationCodeRedemption, PaidCourseRegistration, CouponRedemption, OrderItem, \
InvoiceTransaction
from student.models import CourseEnrollment, ManualEnrollmentAudit
class PaidCourseEnrollmentReportProvider(BaseAbstractEnrollmentReportProvider):
"""
The concrete class for all CyberSource Enrollment Reports.
"""
def get_enrollment_info(self, user, course_id):
"""
Returns the User Enrollment information.
"""
course = get_course_by_id(course_id, depth=0)
is_course_staff = bool(has_access(user, 'staff', course))
manual_enrollment_reason = 'N/A'
# check the user enrollment role
if user.is_staff:
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
enrollment_role = _('{platform_name} Staff').format(platform_name=platform_name)
elif is_course_staff:
enrollment_role = _('Course Staff')
else:
enrollment_role = _('Student')
course_enrollment = CourseEnrollment.get_enrollment(user=user, course_key=course_id)
if is_course_staff:
enrollment_source = _('Staff')
else:
# get the registration_code_redemption object if exists
registration_code_redemption = RegistrationCodeRedemption.registration_code_used_for_enrollment(
course_enrollment)
# get the paid_course registration item if exists
paid_course_reg_item = PaidCourseRegistration.get_course_item_for_user_enrollment(
user=user,
course_id=course_id,
course_enrollment=course_enrollment
)
# from where the user get here
if registration_code_redemption is not None:
enrollment_source = _('Used Registration Code')
elif paid_course_reg_item is not None:
enrollment_source = _('Credit Card - Individual')
else:
manual_enrollment = ManualEnrollmentAudit.get_manual_enrollment(course_enrollment)
if manual_enrollment is not None:
enrollment_source = _(
'manually enrolled by username: {username}'
).format(username=manual_enrollment.enrolled_by.username)
manual_enrollment_reason = manual_enrollment.reason
else:
enrollment_source = _('Manually Enrolled')
enrollment_date = course_enrollment.created.strftime("%B %d, %Y")
currently_enrolled = course_enrollment.is_active
course_enrollment_data = collections.OrderedDict()
course_enrollment_data['Enrollment Date'] = enrollment_date
course_enrollment_data['Currently Enrolled'] = currently_enrolled
course_enrollment_data['Enrollment Source'] = enrollment_source
course_enrollment_data['Manual (Un)Enrollment Reason'] = manual_enrollment_reason
course_enrollment_data['Enrollment Role'] = enrollment_role
return course_enrollment_data
def get_payment_info(self, user, course_id):
"""
Returns the User Payment information.
"""
course_enrollment = CourseEnrollment.get_enrollment(user=user, course_key=course_id)
paid_course_reg_item = PaidCourseRegistration.get_course_item_for_user_enrollment(
user=user,
course_id=course_id,
course_enrollment=course_enrollment
)
payment_data = collections.OrderedDict()
# check if the user made a single self purchase scenario
# for enrollment in the course.
if paid_course_reg_item is not None:
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(
order_id=paid_course_reg_item.order_id)
coupon_codes = [redemption.coupon.code for redemption in coupon_redemption]
coupon_codes = ", ".join(coupon_codes)
registration_code_used = 'N/A'
list_price = paid_course_reg_item.get_list_price()
payment_amount = paid_course_reg_item.unit_cost
coupon_codes_used = coupon_codes
payment_status = paid_course_reg_item.status
transaction_reference_number = paid_course_reg_item.order_id
else:
# check if the user used a registration code for the enrollment.
registration_code_redemption = RegistrationCodeRedemption.registration_code_used_for_enrollment(
course_enrollment)
if registration_code_redemption is not None:
registration_code = registration_code_redemption.registration_code
registration_code_used = registration_code.code
if registration_code.invoice_item_id:
list_price, payment_amount, payment_status, transaction_reference_number =\
self._get_invoice_data(registration_code_redemption)
coupon_codes_used = 'N/A'
elif registration_code_redemption.registration_code.order_id:
list_price, payment_amount, coupon_codes_used, payment_status, transaction_reference_number = \
self._get_order_data(registration_code_redemption, course_id)
else:
# this happens when the registration code is not created via invoice or bulk purchase
# scenario.
list_price = 'N/A'
payment_amount = 'N/A'
coupon_codes_used = 'N/A'
registration_code_used = 'N/A'
payment_status = _('Data Integrity Error')
transaction_reference_number = 'N/A'
else:
list_price = 'N/A'
payment_amount = 'N/A'
coupon_codes_used = 'N/A'
registration_code_used = 'N/A'
payment_status = _('TBD')
transaction_reference_number = 'N/A'
payment_data['List Price'] = list_price
payment_data['Payment Amount'] = payment_amount
payment_data['Coupon Codes Used'] = coupon_codes_used
payment_data['Registration Code Used'] = registration_code_used
payment_data['Payment Status'] = payment_status
payment_data['Transaction Reference Number'] = transaction_reference_number
return payment_data
def _get_order_data(self, registration_code_redemption, course_id):
"""
Returns the order data
"""
order_item = OrderItem.objects.get(order=registration_code_redemption.registration_code.order,
courseregcodeitem__course_id=course_id)
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(
order_id=registration_code_redemption.registration_code.order)
coupon_codes = [redemption.coupon.code for redemption in coupon_redemption]
coupon_codes = ", ".join(coupon_codes)
list_price = order_item.get_list_price()
payment_amount = order_item.unit_cost
coupon_codes_used = coupon_codes
payment_status = order_item.status
transaction_reference_number = order_item.order_id
return list_price, payment_amount, coupon_codes_used, payment_status, transaction_reference_number
def _get_invoice_data(self, registration_code_redemption):
"""
Returns the Invoice data
"""
registration_code = registration_code_redemption.registration_code
list_price = registration_code.invoice_item.unit_price
total_amount = registration_code_redemption.registration_code.invoice.total_amount
qty = registration_code_redemption.registration_code.invoice_item.qty
payment_amount = total_amount / qty
invoice_transaction = InvoiceTransaction.get_invoice_transaction(
invoice_id=registration_code_redemption.registration_code.invoice.id)
if invoice_transaction is not None:
# amount greater than 0 is invoice has bee paid
if invoice_transaction.amount > 0:
payment_status = 'Invoice Paid'
else:
# amount less than 0 is invoice has been refunded
payment_status = 'Refunded'
else:
payment_status = 'Invoice Outstanding'
transaction_reference_number = registration_code_redemption.registration_code.invoice_id
return list_price, payment_amount, payment_status, transaction_reference_number
|
salguarnieri/intellij-community
|
refs/heads/master
|
python/testData/formatter/newLineAfterColon.py
|
83
|
if True: pass
|
jnovinger/django
|
refs/heads/master
|
django/contrib/gis/db/models/functions.py
|
209
|
from decimal import Decimal
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import AreaField
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.core.exceptions import FieldError
from django.db.models import FloatField, IntegerField, TextField
from django.db.models.expressions import Func, Value
from django.utils import six
NUMERIC_TYPES = six.integer_types + (float, Decimal)
class GeoFunc(Func):
function = None
output_field_class = None
geom_param_pos = 0
def __init__(self, *expressions, **extra):
if 'output_field' not in extra and self.output_field_class:
extra['output_field'] = self.output_field_class()
super(GeoFunc, self).__init__(*expressions, **extra)
@property
def name(self):
return self.__class__.__name__
@property
def srid(self):
expr = self.source_expressions[self.geom_param_pos]
if hasattr(expr, 'srid'):
return expr.srid
try:
return expr.field.srid
except (AttributeError, FieldError):
return None
def as_sql(self, compiler, connection):
if self.function is None:
self.function = connection.ops.spatial_function_name(self.name)
return super(GeoFunc, self).as_sql(compiler, connection)
def resolve_expression(self, *args, **kwargs):
res = super(GeoFunc, self).resolve_expression(*args, **kwargs)
base_srid = res.srid
if not base_srid:
raise TypeError("Geometry functions can only operate on geometric content.")
for pos, expr in enumerate(res.source_expressions[1:], start=1):
if isinstance(expr, GeomValue) and expr.srid != base_srid:
# Automatic SRID conversion so objects are comparable
res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs)
return res
def _handle_param(self, value, param_name='', check_types=None):
if not hasattr(value, 'resolve_expression'):
if check_types and not isinstance(value, check_types):
raise TypeError(
"The %s parameter has the wrong type: should be %s." % (
param_name, str(check_types))
)
return value
class GeomValue(Value):
geography = False
@property
def srid(self):
return self.value.srid
def as_sql(self, compiler, connection):
if self.geography:
self.value = connection.ops.Adapter(self.value, geography=self.geography)
else:
self.value = connection.ops.Adapter(self.value)
return super(GeomValue, self).as_sql(compiler, connection)
def as_mysql(self, compiler, connection):
return 'GeomFromText(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
def as_sqlite(self, compiler, connection):
return 'GeomFromText(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
class GeoFuncWithGeoParam(GeoFunc):
def __init__(self, expression, geom, *expressions, **extra):
if not hasattr(geom, 'srid'):
# Try to interpret it as a geometry input
try:
geom = GEOSGeometry(geom)
except Exception:
raise ValueError("This function requires a geometric parameter.")
if not geom.srid:
raise ValueError("Please provide a geometry attribute with a defined SRID.")
geom = GeomValue(geom)
super(GeoFuncWithGeoParam, self).__init__(expression, geom, *expressions, **extra)
class SQLiteDecimalToFloatMixin(object):
"""
By default, Decimal values are converted to str by the SQLite backend, which
is not acceptable by the GIS functions expecting numeric values.
"""
def as_sqlite(self, compiler, connection):
for expr in self.get_source_expressions():
if hasattr(expr, 'value') and isinstance(expr.value, Decimal):
expr.value = float(expr.value)
return super(SQLiteDecimalToFloatMixin, self).as_sql(compiler, connection)
class Area(GeoFunc):
def as_sql(self, compiler, connection):
if connection.ops.oracle:
self.output_field = AreaField('sq_m') # Oracle returns area in units of meters.
else:
if connection.ops.geography:
# Geography fields support area calculation, returns square meters.
self.output_field = AreaField('sq_m')
elif not self.output_field.geodetic(connection):
# Getting the area units of the geographic field.
units = self.output_field.units_name(connection)
if units:
self.output_field = AreaField(
AreaMeasure.unit_attname(self.output_field.units_name(connection)))
else:
self.output_field = FloatField()
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
return super(Area, self).as_sql(compiler, connection)
class AsGeoJSON(GeoFunc):
output_field_class = TextField
def __init__(self, expression, bbox=False, crs=False, precision=8, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
if options:
expressions.append(options)
super(AsGeoJSON, self).__init__(*expressions, **extra)
class AsGML(GeoFunc):
geom_param_pos = 1
output_field_class = TextField
def __init__(self, expression, version=2, precision=8, **extra):
expressions = [version, expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(AsGML, self).__init__(*expressions, **extra)
class AsKML(AsGML):
def as_sqlite(self, compiler, connection):
# No version parameter
self.source_expressions.pop(0)
return super(AsKML, self).as_sql(compiler, connection)
class AsSVG(GeoFunc):
output_field_class = TextField
def __init__(self, expression, relative=False, precision=8, **extra):
relative = relative if hasattr(relative, 'resolve_expression') else int(relative)
expressions = [
expression,
relative,
self._handle_param(precision, 'precision', six.integer_types),
]
super(AsSVG, self).__init__(*expressions, **extra)
class BoundingCircle(GeoFunc):
def __init__(self, expression, num_seg=48, **extra):
super(BoundingCircle, self).__init__(*[expression, num_seg], **extra)
class Centroid(GeoFunc):
pass
class Difference(GeoFuncWithGeoParam):
pass
class DistanceResultMixin(object):
def convert_value(self, value, expression, connection, context):
if value is None:
return None
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection):
dist_att = 'm'
else:
units = geo_field.units_name(connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
else:
dist_att = None
if dist_att:
return DistanceMeasure(**{dist_att: value})
return value
class Distance(DistanceResultMixin, GeoFuncWithGeoParam):
output_field_class = FloatField
spheroid = None
def __init__(self, expr1, expr2, spheroid=None, **extra):
expressions = [expr1, expr2]
if spheroid is not None:
self.spheroid = spheroid
expressions += (self._handle_param(spheroid, 'spheroid', bool),)
super(Distance, self).__init__(*expressions, **extra)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
src_field = self.get_source_fields()[0]
geography = src_field.geography and self.srid == 4326
if geography:
# Set parameters as geography if base field is geography
for pos, expr in enumerate(
self.source_expressions[self.geom_param_pos + 1:], start=self.geom_param_pos + 1):
if isinstance(expr, GeomValue):
expr.geography = True
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need special distance functions
if self.spheroid:
self.function = 'ST_Distance_Spheroid' # More accurate, resource intensive
# Replace boolean param by the real spheroid of the base field
self.source_expressions[2] = Value(geo_field._spheroid)
else:
self.function = 'ST_Distance_Sphere'
return super(Distance, self).as_sql(compiler, connection)
class Envelope(GeoFunc):
pass
class ForceRHR(GeoFunc):
pass
class GeoHash(GeoFunc):
output_field_class = TextField
def __init__(self, expression, precision=None, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(GeoHash, self).__init__(*expressions, **extra)
class Intersection(GeoFuncWithGeoParam):
pass
class Length(DistanceResultMixin, GeoFunc):
output_field_class = FloatField
def __init__(self, expr1, spheroid=True, **extra):
self.spheroid = spheroid
super(Length, self).__init__(expr1, **extra)
def as_sql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection) and not connection.features.supports_length_geodetic:
raise NotImplementedError("This backend doesn't support Length on geodetic fields")
return super(Length, self).as_sql(compiler, connection)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
src_field = self.get_source_fields()[0]
geography = src_field.geography and self.srid == 4326
if geography:
self.source_expressions.append(Value(self.spheroid))
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need length_spheroid
self.function = 'ST_Length_Spheroid'
self.source_expressions.append(Value(geo_field._spheroid))
else:
dim = min(f.dim for f in self.get_source_fields() if f)
if dim > 2:
self.function = connection.ops.length3d
return super(Length, self).as_sql(compiler, connection)
def as_sqlite(self, compiler, connection):
geo_field = GeometryField(srid=self.srid)
if geo_field.geodetic(connection):
if self.spheroid:
self.function = 'GeodesicLength'
else:
self.function = 'GreatCircleLength'
return super(Length, self).as_sql(compiler, connection)
class MemSize(GeoFunc):
output_field_class = IntegerField
class NumGeometries(GeoFunc):
output_field_class = IntegerField
class NumPoints(GeoFunc):
output_field_class = IntegerField
def as_sqlite(self, compiler, connection):
if self.source_expressions[self.geom_param_pos].output_field.geom_type != 'LINESTRING':
raise TypeError("Spatialite NumPoints can only operate on LineString content")
return super(NumPoints, self).as_sql(compiler, connection)
class Perimeter(DistanceResultMixin, GeoFunc):
output_field_class = FloatField
def as_postgresql(self, compiler, connection):
dim = min(f.dim for f in self.get_source_fields())
if dim > 2:
self.function = connection.ops.perimeter3d
return super(Perimeter, self).as_sql(compiler, connection)
class PointOnSurface(GeoFunc):
pass
class Reverse(GeoFunc):
pass
class Scale(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, x, y, z=0.0, **extra):
expressions = [
expression,
self._handle_param(x, 'x', NUMERIC_TYPES),
self._handle_param(y, 'y', NUMERIC_TYPES),
]
if z != 0.0:
expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES))
super(Scale, self).__init__(*expressions, **extra)
class SnapToGrid(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, *args, **extra):
nargs = len(args)
expressions = [expression]
if nargs in (1, 2):
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args]
)
elif nargs == 4:
# Reverse origin and size param ordering
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]]
)
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]]
)
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.')
super(SnapToGrid, self).__init__(*expressions, **extra)
class SymDifference(GeoFuncWithGeoParam):
pass
class Transform(GeoFunc):
def __init__(self, expression, srid, **extra):
expressions = [
expression,
self._handle_param(srid, 'srid', six.integer_types),
]
super(Transform, self).__init__(*expressions, **extra)
@property
def srid(self):
# Make srid the resulting srid of the transformation
return self.source_expressions[self.geom_param_pos + 1].value
def convert_value(self, value, expression, connection, context):
value = super(Transform, self).convert_value(value, expression, connection, context)
if not connection.ops.postgis and not value.srid:
# Some backends do not set the srid on the returning geometry
value.srid = self.srid
return value
class Translate(Scale):
def as_sqlite(self, compiler, connection):
func_name = connection.ops.spatial_function_name(self.name)
if func_name == 'ST_Translate' and len(self.source_expressions) < 4:
# Always provide the z parameter for ST_Translate (Spatialite >= 3.1)
self.source_expressions.append(Value(0))
elif func_name == 'ShiftCoords' and len(self.source_expressions) > 3:
raise ValueError("This version of Spatialite doesn't support 3D")
return super(Translate, self).as_sqlite(compiler, connection)
class Union(GeoFuncWithGeoParam):
pass
|
skevy/django
|
refs/heads/master
|
tests/regressiontests/forms/localflavor/br.py
|
89
|
from django.contrib.localflavor.br.forms import (BRZipCodeField,
BRCNPJField, BRCPFField, BRPhoneNumberField, BRStateSelect,
BRStateChoiceField)
from utils import LocalFlavorTestCase
class BRLocalFlavorTests(LocalFlavorTestCase):
def test_BRZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXXX-XXX.']
valid = {
'12345-123': '12345-123',
}
invalid = {
'12345_123': error_format,
'1234-123': error_format,
'abcde-abc': error_format,
'12345-': error_format,
'-123': error_format,
}
self.assertFieldOutput(BRZipCodeField, valid, invalid)
def test_BRCNPJField(self):
error_format = [u'Invalid CNPJ number.']
error_numbersonly = [u'This field requires only numbers.']
valid = {
'64.132.916/0001-88': '64.132.916/0001-88',
'64-132-916/0001-88': '64-132-916/0001-88',
'64132916/0001-88': '64132916/0001-88',
}
invalid = {
'12-345-678/9012-10': error_format,
'12.345.678/9012-10': error_format,
'12345678/9012-10': error_format,
'64.132.916/0001-XX': error_numbersonly,
}
self.assertFieldOutput(BRCNPJField, valid, invalid)
def test_BRCPFField(self):
error_format = [u'Invalid CPF number.']
error_numbersonly = [u'This field requires only numbers.']
error_atmost_chars = [u'Ensure this value has at most 14 characters (it has 15).']
error_atleast_chars = [u'Ensure this value has at least 11 characters (it has 10).']
error_atmost = [u'This field requires at most 11 digits or 14 characters.']
valid = {
'663.256.017-26': '663.256.017-26',
'66325601726': '66325601726',
'375.788.573-20': '375.788.573-20',
'84828509895': '84828509895',
}
invalid = {
'489.294.654-54': error_format,
'295.669.575-98': error_format,
'539.315.127-22': error_format,
'375.788.573-XX': error_numbersonly,
'375.788.573-000': error_atmost_chars,
'123.456.78': error_atleast_chars,
'123456789555': error_atmost,
}
self.assertFieldOutput(BRCPFField, valid, invalid)
def test_BRPhoneNumberField(self):
# TODO: this doesn't test for any invalid inputs.
valid = {
'41-3562-3464': u'41-3562-3464',
'4135623464': u'41-3562-3464',
'41 3562-3464': u'41-3562-3464',
'41 3562 3464': u'41-3562-3464',
'(41) 3562 3464': u'41-3562-3464',
'41.3562.3464': u'41-3562-3464',
'41.3562-3464': u'41-3562-3464',
' (41) 3562.3464': u'41-3562-3464',
}
invalid = {}
self.assertFieldOutput(BRPhoneNumberField, valid, invalid)
def test_BRStateSelect(self):
f = BRStateSelect()
out = u'''<select name="states">
<option value="AC">Acre</option>
<option value="AL">Alagoas</option>
<option value="AP">Amap\xe1</option>
<option value="AM">Amazonas</option>
<option value="BA">Bahia</option>
<option value="CE">Cear\xe1</option>
<option value="DF">Distrito Federal</option>
<option value="ES">Esp\xedrito Santo</option>
<option value="GO">Goi\xe1s</option>
<option value="MA">Maranh\xe3o</option>
<option value="MT">Mato Grosso</option>
<option value="MS">Mato Grosso do Sul</option>
<option value="MG">Minas Gerais</option>
<option value="PA">Par\xe1</option>
<option value="PB">Para\xedba</option>
<option value="PR" selected="selected">Paran\xe1</option>
<option value="PE">Pernambuco</option>
<option value="PI">Piau\xed</option>
<option value="RJ">Rio de Janeiro</option>
<option value="RN">Rio Grande do Norte</option>
<option value="RS">Rio Grande do Sul</option>
<option value="RO">Rond\xf4nia</option>
<option value="RR">Roraima</option>
<option value="SC">Santa Catarina</option>
<option value="SP">S\xe3o Paulo</option>
<option value="SE">Sergipe</option>
<option value="TO">Tocantins</option>
</select>'''
self.assertEqual(f.render('states', 'PR'), out)
def test_BRStateChoiceField(self):
error_invalid = [u'Select a valid brazilian state. That state is not one of the available states.']
valid = {
'AC': 'AC',
'AL': 'AL',
'AP': 'AP',
'AM': 'AM',
'BA': 'BA',
'CE': 'CE',
'DF': 'DF',
'ES': 'ES',
'GO': 'GO',
'MA': 'MA',
'MT': 'MT',
'MS': 'MS',
'MG': 'MG',
'PA': 'PA',
'PB': 'PB',
'PR': 'PR',
'PE': 'PE',
'PI': 'PI',
'RJ': 'RJ',
'RN': 'RN',
'RS': 'RS',
'RO': 'RO',
'RR': 'RR',
'SC': 'SC',
'SP': 'SP',
'SE': 'SE',
'TO': 'TO',
}
invalid = {
'pr': error_invalid,
}
self.assertFieldOutput(BRStateChoiceField, valid, invalid)
|
SilentCircle/sentry
|
refs/heads/master
|
conftest.py
|
3
|
from django.conf import settings
import base64
import os
import os.path
def pytest_configure(config):
import warnings
warnings.filterwarnings('error', '', Warning, r'(sentry|raven)')
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
test_db = os.environ.get('DB', 'sqlite')
if test_db == 'mysql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sentry',
'USER': 'root',
})
elif test_db == 'postgres':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'sentry',
'OPTIONS': {
'autocommit': True,
}
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
# http://djangosnippets.org/snippets/646/
class InvalidVarException(object):
def __mod__(self, missing):
try:
missing_str = unicode(missing)
except:
missing_str = 'Failed to create string representation'
raise Exception('Unknown template variable %r %s' % (missing, missing_str))
def __contains__(self, search):
if search == '%s':
return True
return False
settings.TEMPLATE_DEBUG = True
# settings.TEMPLATE_STRING_IF_INVALID = InvalidVarException()
# Disable static compiling in tests
settings.STATIC_BUNDLES = {}
# override a few things with our test specifics
settings.INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + (
'tests',
)
settings.SENTRY_KEY = base64.b64encode(os.urandom(40))
settings.SENTRY_PUBLIC = False
# This speeds up the tests considerably, pbkdf2 is by design, slow.
settings.PASSWORD_HASHERS = [
'django.contrib.auth.hashers.MD5PasswordHasher',
]
# enable draft features
settings.SENTRY_ENABLE_EXPLORE_CODE = True
settings.SENTRY_ENABLE_EXPLORE_USERS = True
|
adw0rd/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/regressiontests/utils/module_loading.py
|
38
|
import os
import sys
import unittest
from zipimport import zipimporter
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
class DefaultLoader(unittest.TestCase):
def test_loader(self):
"Normal module existence can be tested"
test_module = import_module('regressiontests.utils.test_module')
# An importable child
self.assertTrue(module_has_submodule(test_module, 'good_module'))
mod = import_module('regressiontests.utils.test_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(test_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'regressiontests.utils.test_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(test_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'regressiontests.utils.test_module.no_such_module')
# Don't be confused by caching of import misses
import types # causes attempted import of regressiontests.utils.types
self.assertFalse(module_has_submodule(sys.modules['regressiontests.utils'], 'types'))
class EggLoader(unittest.TestCase):
def setUp(self):
self.old_path = sys.path[:]
self.egg_dir = '%s/eggs' % os.path.dirname(__file__)
def tearDown(self):
sys.path = self.old_path
sys.path_importer_cache.clear()
sys.modules.pop('egg_module.sub1.sub2.bad_module', None)
sys.modules.pop('egg_module.sub1.sub2.good_module', None)
sys.modules.pop('egg_module.sub1.sub2', None)
sys.modules.pop('egg_module.sub1', None)
sys.modules.pop('egg_module.bad_module', None)
sys.modules.pop('egg_module.good_module', None)
sys.modules.pop('egg_module', None)
def test_shallow_loader(self):
"Module existence can be tested inside eggs"
egg_name = '%s/test_egg.egg' % self.egg_dir
sys.path.append(egg_name)
egg_module = import_module('egg_module')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.no_such_module')
def test_deep_loader(self):
"Modules deep inside an egg can still be tested for existence"
egg_name = '%s/test_egg.egg' % self.egg_dir
sys.path.append(egg_name)
egg_module = import_module('egg_module.sub1.sub2')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.sub1.sub2.good_module')
self.assertEqual(mod.content, 'Deep Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.no_such_module')
class TestFinder(object):
def __init__(self, *args, **kwargs):
self.importer = zipimporter(*args, **kwargs)
def find_module(self, path):
importer = self.importer.find_module(path)
if importer is None:
return
return TestLoader(importer)
class TestLoader(object):
def __init__(self, importer):
self.importer = importer
def load_module(self, name):
mod = self.importer.load_module(name)
mod.__loader__ = self
return mod
class CustomLoader(EggLoader):
"""The Custom Loader test is exactly the same as the EggLoader, but
it uses a custom defined Loader and Finder that is intentionally
split into two classes. Although the EggLoader combines both functions
into one class, this isn't required.
"""
def setUp(self):
super(CustomLoader, self).setUp()
sys.path_hooks.insert(0, TestFinder)
sys.path_importer_cache.clear()
def tearDown(self):
super(CustomLoader, self).tearDown()
sys.path_hooks.pop(0)
|
pap/nupic
|
refs/heads/master
|
external/linux32/lib/python2.6/site-packages/matplotlib/transforms.py
|
69
|
"""
matplotlib includes a framework for arbitrary geometric
transformations that is used determine the final position of all
elements drawn on the canvas.
Transforms are composed into trees of :class:`TransformNode` objects
whose actual value depends on their children. When the contents of
children change, their parents are automatically invalidated. The
next time an invalidated transform is accessed, it is recomputed to
reflect those changes. This invalidation/caching approach prevents
unnecessary recomputations of transforms, and contributes to better
interactive performance.
For example, here is a graph of the transform tree used to plot data
to the graph:
.. image:: ../_static/transforms.png
The framework can be used for both affine and non-affine
transformations. However, for speed, we want use the backend
renderers to perform affine transformations whenever possible.
Therefore, it is possible to perform just the affine or non-affine
part of a transformation on a set of data. The affine is always
assumed to occur after the non-affine. For any transform::
full transform == non-affine part + affine part
The backends are not expected to handle non-affine transformations
themselves.
"""
import numpy as np
from numpy import ma
from matplotlib._path import affine_transform
from numpy.linalg import inv
from weakref import WeakKeyDictionary
import warnings
try:
set
except NameError:
from sets import Set as set
import cbook
from path import Path
from _path import count_bboxes_overlapping_bbox, update_path_extents
DEBUG = False
if DEBUG:
import warnings
MaskedArray = ma.MaskedArray
class TransformNode(object):
"""
:class:`TransformNode` is the base class for anything that
participates in the transform tree and needs to invalidate its
parents or be invalidated. This includes classes that are not
really transforms, such as bounding boxes, since some transforms
depend on bounding boxes to compute their values.
"""
_gid = 0
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
INVALID_NON_AFFINE = 1
INVALID_AFFINE = 2
INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
is_bbox = False
# If pass_through is True, all ancestors will always be
# invalidated, even if 'self' is already invalid.
pass_through = False
def __init__(self):
"""
Creates a new :class:`TransformNode`.
"""
# Parents are stored in a WeakKeyDictionary, so that if the
# parents are deleted, references from the children won't keep
# them alive.
self._parents = WeakKeyDictionary()
# TransformNodes start out as invalid until their values are
# computed for the first time.
self._invalid = 1
def __copy__(self, *args):
raise NotImplementedError(
"TransformNode instances can not be copied. " +
"Consider using frozen() instead.")
__deepcopy__ = __copy__
def invalidate(self):
"""
Invalidate this :class:`TransformNode` and all of its
ancestors. Should be called any time the transform changes.
"""
# If we are an affine transform being changed, we can set the
# flag to INVALID_AFFINE_ONLY
value = (self.is_affine) and self.INVALID_AFFINE or self.INVALID
# Shortcut: If self is already invalid, that means its parents
# are as well, so we don't need to do anything.
if self._invalid == value:
return
if not len(self._parents):
self._invalid = value
return
# Invalidate all ancestors of self using pseudo-recursion.
stack = [self]
while len(stack):
root = stack.pop()
# Stop at subtrees that have already been invalidated
if root._invalid != value or root.pass_through:
root._invalid = self.INVALID
stack.extend(root._parents.keys())
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
for child in children:
child._parents[self] = None
if DEBUG:
_set_children = set_children
def set_children(self, *children):
self._set_children(*children)
self._children = children
set_children.__doc__ = _set_children.__doc__
def frozen(self):
"""
Returns a frozen copy of this transform node. The frozen copy
will not update when its children change. Useful for storing
a previously known state of a transform where
``copy.deepcopy()`` might normally be used.
"""
return self
if DEBUG:
def write_graphviz(self, fobj, highlight=[]):
"""
For debugging purposes.
Writes the transform tree rooted at 'self' to a graphviz "dot"
format file. This file can be run through the "dot" utility
to produce a graph of the transform tree.
Affine transforms are marked in blue. Bounding boxes are
marked in yellow.
*fobj*: A Python file-like object
"""
seen = set()
def recurse(root):
if root in seen:
return
seen.add(root)
props = {}
label = root.__class__.__name__
if root._invalid:
label = '[%s]' % label
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(['%s=%s' % (key, val) for key, val in props.items()])
fobj.write('%s [%s];\n' %
(hash(root), props))
if hasattr(root, '_children'):
for child in root._children:
name = '?'
for key, val in root.__dict__.items():
if val is child:
name = key
break
fobj.write('%s -> %s [label="%s", fontsize=10];\n' % (
hash(root),
hash(child),
name))
recurse(child)
fobj.write("digraph G {\n")
recurse(self)
fobj.write("}\n")
else:
def write_graphviz(self, fobj, highlight=[]):
return
class BboxBase(TransformNode):
"""
This is the base class of all bounding boxes, and provides
read-only access to its data. A mutable bounding box is provided
by the :class:`Bbox` class.
The canonical representation is as two points, with no
restrictions on their ordering. Convenience properties are
provided to get the left, bottom, right and top edges and width
and height, but these are not stored explicity.
"""
is_bbox = True
is_affine = True
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
if DEBUG:
def _check(points):
if ma.isMaskedArray(points):
warnings.warn("Bbox bounds are a masked array.")
points = np.asarray(points)
if (points[1,0] - points[0,0] == 0 or
points[1,1] - points[0,1] == 0):
warnings.warn("Singular Bbox.")
_check = staticmethod(_check)
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
def is_unit(self):
"""
Returns True if the :class:`Bbox` is the unit bounding box
from (0, 0) to (1, 1).
"""
return list(self.get_points().flatten()) == [0., 0., 1., 1.]
def _get_x0(self):
return self.get_points()[0, 0]
x0 = property(_get_x0, None, None, """
(property) :attr:`x0` is the first of the pair of *x* coordinates that
define the bounding box. :attr:`x0` is not guaranteed to be
less than :attr:`x1`. If you require that, use :attr:`xmin`.""")
def _get_y0(self):
return self.get_points()[0, 1]
y0 = property(_get_y0, None, None, """
(property) :attr:`y0` is the first of the pair of *y* coordinates that
define the bounding box. :attr:`y0` is not guaranteed to be
less than :attr:`y1`. If you require that, use :attr:`ymin`.""")
def _get_x1(self):
return self.get_points()[1, 0]
x1 = property(_get_x1, None, None, """
(property) :attr:`x1` is the second of the pair of *x* coordinates that
define the bounding box. :attr:`x1` is not guaranteed to be
greater than :attr:`x0`. If you require that, use :attr:`xmax`.""")
def _get_y1(self):
return self.get_points()[1, 1]
y1 = property(_get_y1, None, None, """
(property) :attr:`y1` is the second of the pair of *y* coordinates that
define the bounding box. :attr:`y1` is not guaranteed to be
greater than :attr:`y0`. If you require that, use :attr:`ymax`.""")
def _get_p0(self):
return self.get_points()[0]
p0 = property(_get_p0, None, None, """
(property) :attr:`p0` is the first pair of (*x*, *y*) coordinates that
define the bounding box. It is not guaranteed to be the bottom-left
corner. For that, use :attr:`min`.""")
def _get_p1(self):
return self.get_points()[1]
p1 = property(_get_p1, None, None, """
(property) :attr:`p1` is the second pair of (*x*, *y*) coordinates that
define the bounding box. It is not guaranteed to be the top-right
corner. For that, use :attr:`max`.""")
def _get_xmin(self):
return min(self.get_points()[:, 0])
xmin = property(_get_xmin, None, None, """
(property) :attr:`xmin` is the left edge of the bounding box.""")
def _get_ymin(self):
return min(self.get_points()[:, 1])
ymin = property(_get_ymin, None, None, """
(property) :attr:`ymin` is the bottom edge of the bounding box.""")
def _get_xmax(self):
return max(self.get_points()[:, 0])
xmax = property(_get_xmax, None, None, """
(property) :attr:`xmax` is the right edge of the bounding box.""")
def _get_ymax(self):
return max(self.get_points()[:, 1])
ymax = property(_get_ymax, None, None, """
(property) :attr:`ymax` is the top edge of the bounding box.""")
def _get_min(self):
return [min(self.get_points()[:, 0]),
min(self.get_points()[:, 1])]
min = property(_get_min, None, None, """
(property) :attr:`min` is the bottom-left corner of the bounding
box.""")
def _get_max(self):
return [max(self.get_points()[:, 0]),
max(self.get_points()[:, 1])]
max = property(_get_max, None, None, """
(property) :attr:`max` is the top-right corner of the bounding box.""")
def _get_intervalx(self):
return self.get_points()[:, 0]
intervalx = property(_get_intervalx, None, None, """
(property) :attr:`intervalx` is the pair of *x* coordinates that define
the bounding box. It is not guaranteed to be sorted from left to
right.""")
def _get_intervaly(self):
return self.get_points()[:, 1]
intervaly = property(_get_intervaly, None, None, """
(property) :attr:`intervaly` is the pair of *y* coordinates that define
the bounding box. It is not guaranteed to be sorted from bottom to
top.""")
def _get_width(self):
points = self.get_points()
return points[1, 0] - points[0, 0]
width = property(_get_width, None, None, """
(property) The width of the bounding box. It may be negative if
:attr:`x1` < :attr:`x0`.""")
def _get_height(self):
points = self.get_points()
return points[1, 1] - points[0, 1]
height = property(_get_height, None, None, """
(property) The height of the bounding box. It may be negative if
:attr:`y1` < :attr:`y0`.""")
def _get_size(self):
points = self.get_points()
return points[1] - points[0]
size = property(_get_size, None, None, """
(property) The width and height of the bounding box. May be negative,
in the same way as :attr:`width` and :attr:`height`.""")
def _get_bounds(self):
x0, y0, x1, y1 = self.get_points().flatten()
return (x0, y0, x1 - x0, y1 - y0)
bounds = property(_get_bounds, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
:attr:`height`).""")
def _get_extents(self):
return self.get_points().flatten().copy()
extents = property(_get_extents, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`, :attr:`y1`).""")
def get_points(self):
return NotImplementedError()
def containsx(self, x):
"""
Returns True if *x* is between or equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x >= x0 and x <= x1))
or (x >= x1 and x <= x0))
def containsy(self, y):
"""
Returns True if *y* is between or equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y >= y0 and y <= y1))
or (y >= y1 and y <= y0))
def contains(self, x, y):
"""
Returns *True* if (*x*, *y*) is a coordinate inside the
bounding box or on its edge.
"""
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 < ax1) or
(by2 < ay1) or
(bx1 > ax2) or
(by1 > ay2))
def fully_containsx(self, x):
"""
Returns True if *x* is between but not equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x > x0 and x < x1))
or (x > x1 and x < x0))
def fully_containsy(self, y):
"""
Returns True if *y* is between but not equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (x > y0 and x < y1))
or (x > y1 and x < y0))
def fully_contains(self, x, y):
"""
Returns True if (*x*, *y*) is a coordinate inside the bounding
box, but not on its edge.
"""
return self.fully_containsx(x) \
and self.fully_containsy(y)
def fully_overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*, but not on its edge alone.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 <= ax1) or
(by2 <= ay1) or
(bx1 >= ax2) or
(by1 >= ay2))
def transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the given transform.
"""
return Bbox(transform.transform(self.get_points()))
def inverse_transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the inverse of the given transform.
"""
return Bbox(transform.inverted().transform(self.get_points()))
coefs = {'C': (0.5, 0.5),
'SW': (0,0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container = None):
"""
Return a copy of the :class:`Bbox`, shifted to position *c*
within a container.
*c*: may be either:
* a sequence (*cx*, *cy*) where *cx* and *cy* range from 0
to 1, where 0 is left or bottom and 1 is right or top
* a string:
- 'C' for centered
- 'S' for bottom-center
- 'SE' for bottom-left
- 'E' for left
- etc.
Optional argument *container* is the box within which the
:class:`Bbox` is positioned; it defaults to the initial
:class:`Bbox`.
"""
if container is None:
container = self
l, b, w, h = container.bounds
if isinstance(c, str):
cx, cy = self.coefs[c]
else:
cx, cy = c
L, B, W, H = self.bounds
return Bbox(self._points +
[(l + cx * (w-W)) - L,
(b + cy * (h-H)) - B])
def shrunk(self, mx, my):
"""
Return a copy of the :class:`Bbox`, shrunk by the factor *mx*
in the *x* direction and the factor *my* in the *y* direction.
The lower left corner of the box remains unchanged. Normally
*mx* and *my* will be less than 1, but this is not enforced.
"""
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container = None, fig_aspect = 1.0):
"""
Return a copy of the :class:`Bbox`, shrunk so that it is as
large as it can be while having the desired aspect ratio,
*box_aspect*. If the box coordinates are relative---that
is, fractions of a larger box such as a figure---then the
physical aspect ratio of that figure is specified with
*fig_aspect*, so that *box_aspect* can also be given as a
ratio of the absolute dimensions, not the relative dimensions.
"""
assert box_aspect > 0 and fig_aspect > 0
if container is None:
container = self
w, h = container.size
H = w * box_aspect/fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect/box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with vertical lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
xf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
w = x1 - x0
for xf0, xf1 in zip(xf[:-1], xf[1:]):
boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))
return boxes
def splity(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with horizontal lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
yf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
h = y1 - y0
for yf0, yf1 in zip(yf[:-1], yf[1:]):
boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))
return boxes
def count_contains(self, vertices):
"""
Count the number of vertices contained in the :class:`Bbox`.
*vertices* is a Nx2 Numpy array.
"""
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
x0, y0, x1, y1 = self._get_extents()
dx0 = np.sign(vertices[:, 0] - x0)
dy0 = np.sign(vertices[:, 1] - y0)
dx1 = np.sign(vertices[:, 0] - x1)
dy1 = np.sign(vertices[:, 1] - y1)
inside = (abs(dx0 + dx1) + abs(dy0 + dy1)) <= 2
return np.sum(inside)
def count_overlaps(self, bboxes):
"""
Count the number of bounding boxes that overlap this one.
bboxes is a sequence of :class:`BboxBase` objects
"""
return count_bboxes_overlapping_bbox(self, bboxes)
def expanded(self, sw, sh):
"""
Return a new :class:`Bbox` which is this :class:`Bbox`
expanded around its center by the given factors *sw* and
*sh*.
"""
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, p):
"""
Return a new :class:`Bbox` that is padded on all four sides by
the given value.
"""
points = self._points
return Bbox(points + [[-p, -p], [p, p]])
def translated(self, tx, ty):
"""
Return a copy of the :class:`Bbox`, statically translated by
*tx* and *ty*.
"""
return Bbox(self._points + (tx, ty))
def corners(self):
"""
Return an array of points which are the four corners of this
rectangle. For example, if this :class:`Bbox` is defined by
the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns
(*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).
"""
l, b, r, t = self.get_points().flatten()
return np.array([[l, b], [l, t], [r, b], [r, t]])
def rotated(self, radians):
"""
Return a new bounding box that bounds a rotated version of
this bounding box by the given radians. The new bounding box
is still aligned with the axes, of course.
"""
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
#@staticmethod
def union(bboxes):
"""
Return a :class:`Bbox` that contains all of the given bboxes.
"""
assert(len(bboxes))
if len(bboxes) == 1:
return bboxes[0]
x0 = np.inf
y0 = np.inf
x1 = -np.inf
y1 = -np.inf
for bbox in bboxes:
points = bbox.get_points()
xs = points[:, 0]
ys = points[:, 1]
x0 = min(x0, np.min(xs))
y0 = min(y0, np.min(ys))
x1 = max(x1, np.max(xs))
y1 = max(y1, np.max(ys))
return Bbox.from_extents(x0, y0, x1, y1)
union = staticmethod(union)
class Bbox(BboxBase):
"""
A mutable bounding box.
"""
def __init__(self, points):
"""
*points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
If you need to create a :class:`Bbox` object from another form
of data, consider the static methods :meth:`unit`,
:meth:`from_bounds` and :meth:`from_extents`.
"""
BboxBase.__init__(self)
self._points = np.asarray(points, np.float_)
self._minpos = np.array([0.0000001, 0.0000001])
self._ignore = True
if DEBUG:
___init__ = __init__
def __init__(self, points):
self._check(points)
self.___init__(points)
def invalidate(self):
self._check(self._points)
TransformNode.invalidate(self)
_unit_values = np.array([[0.0, 0.0], [1.0, 1.0]], np.float_)
#@staticmethod
def unit():
"""
(staticmethod) Create a new unit :class:`Bbox` from (0, 0) to
(1, 1).
"""
return Bbox(Bbox._unit_values.copy())
unit = staticmethod(unit)
#@staticmethod
def from_bounds(x0, y0, width, height):
"""
(staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,
*width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
from_bounds = staticmethod(from_bounds)
#@staticmethod
def from_extents(*args):
"""
(staticmethod) Create a new Bbox from *left*, *bottom*,
*right* and *top*.
The *y*-axis increases upwards.
"""
points = np.array(args, dtype=np.float_).reshape(2, 2)
return Bbox(points)
from_extents = staticmethod(from_extents)
def __repr__(self):
return 'Bbox(%s)' % repr(self._points)
__str__ = __repr__
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data` or
:meth:`update_from_data_xy`.
*value*:
- When True, subsequent calls to :meth:`update_from_data`
will ignore the existing bounds of the :class:`Bbox`.
- When False, subsequent calls to :meth:`update_from_data`
will include the existing bounds of the :class:`Bbox`.
"""
self._ignore = value
def update_from_data(self, x, y, ignore=None):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*x*: a numpy array of *x*-values
*y*: a numpy array of *y*-values
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
"""
warnings.warn(
"update_from_data requires a memory copy -- please replace with update_from_data_xy")
xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))
return self.update_from_data_xy(xy, ignore)
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*path*: a :class:`~matplotlib.path.Path` instance
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:,0] = points[:,0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:,1] = points[:,1]
self._minpos[1] = minpos[1]
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*xy*: a numpy array of 2D points
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def _set_x0(self, val):
self._points[0, 0] = val
self.invalidate()
x0 = property(BboxBase._get_x0, _set_x0)
def _set_y0(self, val):
self._points[0, 1] = val
self.invalidate()
y0 = property(BboxBase._get_y0, _set_y0)
def _set_x1(self, val):
self._points[1, 0] = val
self.invalidate()
x1 = property(BboxBase._get_x1, _set_x1)
def _set_y1(self, val):
self._points[1, 1] = val
self.invalidate()
y1 = property(BboxBase._get_y1, _set_y1)
def _set_p0(self, val):
self._points[0] = val
self.invalidate()
p0 = property(BboxBase._get_p0, _set_p0)
def _set_p1(self, val):
self._points[1] = val
self.invalidate()
p1 = property(BboxBase._get_p1, _set_p1)
def _set_intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
intervalx = property(BboxBase._get_intervalx, _set_intervalx)
def _set_intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
intervaly = property(BboxBase._get_intervaly, _set_intervaly)
def _set_bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l+w, b+h]], np.float_)
if np.any(self._points != points):
self._points = points
self.invalidate()
bounds = property(BboxBase._get_bounds, _set_bounds)
def _get_minpos(self):
return self._minpos
minpos = property(_get_minpos)
def _get_minposx(self):
return self._minpos[0]
minposx = property(_get_minposx)
def _get_minposy(self):
return self._minpos[1]
minposy = property(_get_minposy)
def get_points(self):
"""
Get the points of the bounding box directly as a numpy array
of the form: [[x0, y0], [x1, y1]].
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from a numpy array
of the form: [[x0, y0], [x1, y1]]. No error checking is
performed, as this method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another
:class:`Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
class TransformedBbox(BboxBase):
"""
A :class:`Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform):
"""
*bbox*: a child :class:`Bbox`
*transform*: a 2D :class:`Transform`
"""
assert bbox.is_bbox
assert isinstance(transform, Transform)
assert transform.input_dims == 2
assert transform.output_dims == 2
BboxBase.__init__(self)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
def __repr__(self):
return "TransformedBbox(%s, %s)" % (self._bbox, self._transform)
__str__ = __repr__
def get_points(self):
if self._invalid:
points = self._transform.transform(self._bbox.get_points())
if ma.isMaskedArray(points):
points.putmask(0.0)
points = np.asarray(points)
self._points = points
self._invalid = 0
return self._points
get_points.__doc__ = Bbox.get_points.__doc__
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
class Transform(TransformNode):
"""
The base class of all :class:`TransformNode` instances that
actually perform a transformation.
All non-affine transformations should be subclasses of this class.
New affine transformations should be subclasses of
:class:`Affine2D`.
Subclasses of this class should override the following members (at
minimum):
- :attr:`input_dims`
- :attr:`output_dims`
- :meth:`transform`
- :attr:`is_separable`
- :attr:`has_inverse`
- :meth:`inverted` (if :meth:`has_inverse` can return True)
If the transform needs to do something non-standard with
:class:`mathplotlib.path.Path` objects, such as adding curves
where there were once line segments, it should override:
- :meth:`transform_path`
"""
# The number of input and output dimensions for this transform.
# These must be overridden (with integers) in the subclass.
input_dims = None
output_dims = None
# True if this transform as a corresponding inverse transform.
has_inverse = False
# True if this transform is separable in the x- and y- dimensions.
is_separable = False
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
def __add__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(self, other)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __radd__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(other, self)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __array__(self, *args, **kwargs):
"""
Used by C/C++ -based backends to get at the array matrix data.
"""
return self.frozen().__array__()
def transform(self, values):
"""
Performs the transformation on the given array of values.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
raise NotImplementedError()
def transform_affine(self, values):
"""
Performs only the affine part of this transformation on the
given array of values.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally a no-op. In
affine transformations, this is equivalent to
``transform(values)``.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return values
def transform_non_affine(self, values):
"""
Performs only the non-affine part of the transformation.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally equivalent to
``transform(values)``. In affine transformations, this is
always a no-op.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return self.transform(values)
def get_affine(self):
"""
Get the affine part of this transform.
"""
return IdentityTransform()
def transform_point(self, point):
"""
A convenience function that returns the transformed copy of a
single point.
The point is given as a sequence of length :attr:`input_dims`.
The transformed point is returned as a sequence of length
:attr:`output_dims`.
"""
assert len(point) == self.input_dims
return self.transform(np.asarray([point]))[0]
def transform_path(self, path):
"""
Returns a transformed copy of path.
*path*: a :class:`~matplotlib.path.Path` instance.
In some cases, this transform may insert curves into the path
that began as line segments.
"""
return Path(self.transform(path.vertices), path.codes)
def transform_path_affine(self, path):
"""
Returns a copy of path, transformed only by the affine part of
this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return path
def transform_path_non_affine(self, path):
"""
Returns a copy of path, transformed only by the non-affine
part of this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return Path(self.transform_non_affine(path.vertices), path.codes)
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
"""
Performs transformation on a set of angles anchored at
specific locations.
The *angles* must be a column vector (i.e., numpy array).
The *pts* must be a two-column numpy array of x,y positions
(angle transforms currently only work in 2D). This array must
have the same number of rows as *angles*.
*radians* indicates whether or not input angles are given in
radians (True) or degrees (False; the default).
*pushoff* is the distance to move away from *pts* for
determining transformed angles (see discussion of method
below).
The transformed angles are returned in an array with the same
size as *angles*.
The generic version of this method uses a very generic
algorithm that transforms *pts*, as well as locations very
close to *pts*, to find the angle in the transformed system.
"""
# Must be 2D
if self.input_dims <> 2 or self.output_dims <> 2:
raise NotImplementedError('Only defined in 2D')
# pts must be array with 2 columns for x,y
assert pts.shape[1] == 2
# angles must be a column vector and have same number of
# rows as pts
assert np.prod(angles.shape) == angles.shape[0] == pts.shape[0]
# Convert to radians if desired
if not radians:
angles = angles / 180.0 * np.pi
# Move a short distance away
pts2 = pts + pushoff * np.c_[ np.cos(angles), np.sin(angles) ]
# Transform both sets of points
tpts = self.transform( pts )
tpts2 = self.transform( pts2 )
# Calculate transformed angles
d = tpts2 - tpts
a = np.arctan2( d[:,1], d[:,0] )
# Convert back to degrees if desired
if not radians:
a = a * 180.0 / np.pi
return a
def inverted(self):
"""
Return the corresponding inverse transformation.
The return value of this method should be treated as
temporary. An update to *self* does not cause a corresponding
update to its inverted copy.
``x === self.inverted().transform(self.transform(x))``
"""
raise NotImplementedError()
class TransformWrapper(Transform):
"""
A helper class that holds a single child transform and acts
equivalently to it.
This is useful if a node of the transform tree must be replaced at
run time with a transform of a different type. This class allows
that replacement to correctly trigger invalidation.
Note that :class:`TransformWrapper` instances must have the same
input and output dimensions during their entire lifetime, so the
child transform may only be replaced with another child transform
of the same dimensions.
"""
pass_through = True
is_affine = False
def __init__(self, child):
"""
*child*: A class:`Transform` instance. This child may later
be replaced with :meth:`set`.
"""
assert isinstance(child, Transform)
Transform.__init__(self)
self.input_dims = child.input_dims
self.output_dims = child.output_dims
self._set(child)
self._invalid = 0
def __repr__(self):
return "TransformWrapper(%r)" % self._child
__str__ = __repr__
def frozen(self):
return self._child.frozen()
frozen.__doc__ = Transform.frozen.__doc__
def _set(self, child):
self._child = child
self.set_children(child)
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
def set(self, child):
"""
Replace the current child of this transform with another one.
The new child must have the same number of input and output
dimensions as the current child.
"""
assert child.input_dims == self.input_dims
assert child.output_dims == self.output_dims
self._set(child)
self._invalid = 0
self.invalidate()
self._invalid = 0
def _get_is_separable(self):
return self._child.is_separable
is_separable = property(_get_is_separable)
def _get_has_inverse(self):
return self._child.has_inverse
has_inverse = property(_get_has_inverse)
class AffineBase(Transform):
"""
The base class of all affine transformations of any number of
dimensions.
"""
is_affine = True
def __init__(self):
Transform.__init__(self)
self._inverted = None
def __array__(self, *args, **kwargs):
return self.get_matrix()
#@staticmethod
def _concat(a, b):
"""
Concatenates two transformation matrices (represented as numpy
arrays) together.
"""
return np.dot(b, a)
_concat = staticmethod(_concat)
def get_matrix(self):
"""
Get the underlying transformation matrix as a numpy array.
"""
raise NotImplementedError()
def transform_non_affine(self, points):
return points
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_affine(self, path):
return self.transform_path(path)
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
return path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Transform.get_affine.__doc__
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use :class:`Affine2D`.
Subclasses of this class will generally only need to override a
constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
"""
input_dims = 2
output_dims = 2
#* Redundant: Removed for performance
#
# def __init__(self):
# Affine2DBase.__init__(self)
def frozen(self):
return Affine2D(self.get_matrix().copy())
frozen.__doc__ = AffineBase.frozen.__doc__
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
def __array__(self, *args, **kwargs):
return self.get_matrix()
def to_values(self):
"""
Return the values of the matrix as a sequence (a,b,c,d,e,f)
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flatten())
#@staticmethod
def matrix_from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new transformation matrix as a 3x3
numpy array of the form::
a c e
b d f
0 0 1
"""
return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)
matrix_from_values = staticmethod(matrix_from_values)
def transform(self, points):
mtx = self.get_matrix()
if isinstance(points, MaskedArray):
tpoints = affine_transform(points.data, mtx)
return ma.MaskedArray(tpoints, mask=ma.getmask(points))
return affine_transform(points, mtx)
def transform_point(self, point):
mtx = self.get_matrix()
return affine_transform(point, mtx)
transform_point.__doc__ = AffineBase.transform_point.__doc__
if DEBUG:
_transform = transform
def transform(self, points):
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if (not ma.isMaskedArray(points) and
not isinstance(points, np.ndarray)):
warnings.warn(
('A non-numpy array of type %s was passed in for ' +
'transformation. Please correct this.')
% type(values))
return self._transform(points)
transform.__doc__ = AffineBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = AffineBase.transform_affine.__doc__
def inverted(self):
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
self._inverted = Affine2D(inv(mtx))
self._invalid = 0
return self._inverted
inverted.__doc__ = AffineBase.inverted.__doc__
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix = None):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
Affine2DBase.__init__(self)
if matrix is None:
matrix = np.identity(3)
elif DEBUG:
matrix = np.asarray(matrix, np.float_)
assert matrix.shape == (3, 3)
self._mtx = matrix
self._invalid = 0
def __repr__(self):
return "Affine2D(%s)" % repr(self._mtx)
__str__ = __repr__
def __cmp__(self, other):
if (isinstance(other, Affine2D) and
(self.get_matrix() == other.get_matrix()).all()):
return 0
return -1
#@staticmethod
def from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new Affine2D instance from the given
values::
a c e
b d f
0 0 1
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)
.reshape((3,3)))
from_values = staticmethod(from_values)
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
"""
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
:class:`Affine2DBase` object.
"""
assert isinstance(other, Affine2DBase)
self._mtx = other.get_matrix()
self.invalidate()
#@staticmethod
def identity():
"""
(staticmethod) Return a new :class:`Affine2D` object that is
the identity transform.
Unless this transform will be mutated later on, consider using
the faster :class:`IdentityTransform` class instead.
"""
return Affine2D(np.identity(3))
identity = staticmethod(identity)
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
self._mtx = np.identity(3)
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = np.cos(theta)
b = np.sin(theta)
rotate_mtx = np.array(
[[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(rotate_mtx, self._mtx)
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(degrees*np.pi/180.)
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Adds a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
translate_mtx = np.array(
[[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(translate_mtx, self._mtx)
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Adds a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
scale_mtx = np.array(
[[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(scale_mtx, self._mtx)
self.invalidate()
return self
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
class IdentityTransform(Affine2DBase):
"""
A special class that does on thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
return self
frozen.__doc__ = Affine2DBase.frozen.__doc__
def __repr__(self):
return "IdentityTransform()"
__str__ = __repr__
def get_matrix(self):
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def transform(self, points):
return points
transform.__doc__ = Affine2DBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
def transform_path(self, path):
return path
transform_path.__doc__ = Affine2DBase.transform_path.__doc__
transform_path_affine = transform_path
transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Affine2DBase.get_affine.__doc__
inverted = get_affine
inverted.__doc__ = Affine2DBase.inverted.__doc__
class BlendedGenericTransform(Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
# Here we ask: "Does it blend?"
Transform.__init__(self)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def _get_is_affine(self):
return self._x.is_affine and self._y.is_affine
is_affine = property(_get_is_affine)
def frozen(self):
return blended_transform_factory(self._x.frozen(), self._y.frozen())
frozen.__doc__ = Transform.frozen.__doc__
def __repr__(self):
return "BlendedGenericTransform(%s,%s)" % (self._x, self._y)
__str__ = __repr__
def transform(self, points):
x = self._x
y = self._y
if x is y and x.input_dims == 2:
return x.transform(points)
if x.input_dims == 2:
x_points = x.transform(points)[:, 0:1]
else:
x_points = x.transform(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform(points)[:, 1:]
else:
y_points = y.transform(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
return self.transform(points)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def get_affine(self):
if self._invalid or self._affine is None:
if self._x.is_affine and self._y.is_affine:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._affine = Affine2D(mtx)
else:
self._affine = IdentityTransform()
self._invalid = 0
return self._affine
get_affine.__doc__ = Transform.get_affine.__doc__
class BlendedAffine2D(Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type :class:`Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
Both *x_transform* and *y_transform* must be 2D affine
transforms.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
assert x_transform.is_affine
assert y_transform.is_affine
assert x_transform.is_separable
assert y_transform.is_separable
Transform.__init__(self)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def __repr__(self):
return "BlendedAffine2D(%s,%s)" % (self._x, self._y)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase)
and isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Transform.__init__(self)
self._a = a
self._b = b
self.set_children(a, b)
def frozen(self):
self._invalid = 0
frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
frozen.__doc__ = Transform.frozen.__doc__
def _get_is_affine(self):
return self._a.is_affine and self._b.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._a.is_separable and self._b.is_separable
is_separable = property(_get_is_separable)
def __repr__(self):
return "CompositeGenericTransform(%s, %s)" % (self._a, self._b)
__str__ = __repr__
def transform(self, points):
return self._b.transform(
self._a.transform(points))
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._a.is_affine and self._b.is_affine:
return points
return self._b.transform_non_affine(
self._a.transform(points))
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
return self._b.transform_path(
self._a.transform_path(path))
transform_path.__doc__ = Transform.transform_path.__doc__
def transform_path_affine(self, path):
return self._b.transform_path_affine(
self._a.transform_path(path))
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
if self._a.is_affine and self._b.is_affine:
return path
return self._b.transform_path_non_affine(
self._a.transform_path(path))
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
if self._a.is_affine and self._b.is_affine:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
else:
return self._b.get_affine()
get_affine.__doc__ = Transform.get_affine.__doc__
def inverted(self):
return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
inverted.__doc__ = Transform.inverted.__doc__
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
Both *a* and *b* must be instances of :class:`Affine2DBase`.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
assert a.is_affine
assert b.is_affine
Affine2DBase.__init__(self)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
def __repr__(self):
return "CompositeAffine2D(%s, %s)" % (self._a, self._b)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, AffineBase) and isinstance(b, AffineBase):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
"""
:class:`BboxTransform` linearly transforms points from one
:class:`Bbox` to another :class:`Bbox`.
"""
is_separable = True
def __init__(self, boxin, boxout):
"""
Create a new :class:`BboxTransform` that linearly transforms
points from *boxin* to *boxout*.
"""
assert boxin.is_bbox
assert boxout.is_bbox
Affine2DBase.__init__(self)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransform(%s, %s)" % (self._boxin, self._boxout)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError("Transforming from or to a singular bounding box.")
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
[0.0 , y_scale, (-inb*y_scale+outb)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformTo(Affine2DBase):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox`.
"""
is_separable = True
def __init__(self, boxout):
"""
Create a new :class:`BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
assert boxout.is_bbox
Affine2DBase.__init__(self)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformTo(%s)" % (self._boxout)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformFrom(Affine2DBase):
"""
:class:`BboxTransformFrom` linearly transforms points from a given
:class:`Bbox` to the unit bounding box.
"""
is_separable = True
def __init__(self, boxin):
assert boxin.is_bbox
Affine2DBase.__init__(self)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformFrom(%s)" % (self._boxin)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
[0.0 , y_scale, (-inb*y_scale)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformad by the given transform *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans):
Affine2DBase.__init__(self)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
def __repr__(self):
return "ScaledTranslation(%s)" % (self._t,)
__str__ = __repr__
def get_matrix(self):
if self._invalid:
xt, yt = self._scale_trans.transform_point(self._t)
self._mtx = np.array([[1.0, 0.0, xt],
[0.0, 1.0, yt],
[0.0, 0.0, 1.0]],
np.float_)
self._invalid = 0
self._inverted = None
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class TransformedPath(TransformNode):
"""
A :class:`TransformedPath` caches a non-affine transformed copy of
the :class:`~matplotlib.path.Path`. This cached copy is
automatically updated when the non-affine part of the transform
changes.
"""
def __init__(self, path, transform):
"""
Create a new :class:`TransformedPath` from the given
:class:`~matplotlib.path.Path` and :class:`Transform`.
"""
assert isinstance(transform, Transform)
TransformNode.__init__(self)
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path(self._transform.transform_non_affine(self._path.vertices))
self._invalid = 0
def get_transformed_points_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation. Unlike
:meth:`get_transformed_path_and_affine`, no interpolation will
be performed.
"""
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation.
"""
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
"""
Return a fully-transformed copy of the child path.
"""
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._invalid = 0
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
'''
Ensure the endpoints of a range are finite and not too close together.
"too close" means the interval is smaller than 'tiny' times
the maximum absolute value.
If they are too close, each will be moved by the 'expander'.
If 'increasing' is True and vmin > vmax, they will be swapped,
regardless of whether they are too close.
If either is inf or -inf or nan, return - expander, expander.
'''
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny:
if vmin == 0.0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
a, b = interval
return (
((a < b) and (a <= val and b >= val))
or (b <= val and a >= val))
def interval_contains_open(interval, val):
a, b = interval
return (
((a < b) and (a < val and b > val))
or (b < val and a > val))
def offset_copy(trans, fig, x=0.0, y=0.0, units='inches'):
'''
Return a new transform with an added offset.
args:
trans is any transform
kwargs:
fig is the current figure; it can be None if units are 'dots'
x, y give the offset
units is 'inches', 'points' or 'dots'
'''
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif not units == 'inches':
raise ValueError('units must be dots, points, or inches')
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
|
avielman/IU-SCRUM
|
refs/heads/master
|
back-end/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
|
1534
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
|
dahlstrom-g/intellij-community
|
refs/heads/master
|
python/testData/refactoring/inlineFunction/importAs/main.after.py
|
12
|
from source import foo as foo1
foo = 1
x = foo1()
res = x + 2
|
odoousers2014/LibrERP
|
refs/heads/master
|
account_no_dashboard/__init__.py
|
526
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pyblish/pyblish-win
|
refs/heads/master
|
lib/Python27/Lib/json/tests/test_pass3.py
|
145
|
from json.tests import PyTest, CTest
# from http://json.org/JSON_checker/test/pass3.json
JSON = r'''
{
"JSON Test Pattern pass3": {
"The outermost value": "must be an object or array.",
"In this test": "It is an object."
}
}
'''
class TestPass3(object):
def test_parse(self):
# test in/out equivalence and parsing
res = self.loads(JSON)
out = self.dumps(res)
self.assertEqual(res, self.loads(out))
class TestPyPass3(TestPass3, PyTest): pass
class TestCPass3(TestPass3, CTest): pass
|
Motaku/ansible
|
refs/heads/devel
|
lib/ansible/plugins/connections/chroot.py
|
50
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import traceback
import os
import shlex
import subprocess
from ansible import errors
from ansible import utils
from ansible.utils.unicode import to_bytes
from ansible.callbacks import vvv
import ansible.constants as C
BUFSIZE = 65536
class Connection(object):
''' Local chroot based connections '''
def __init__(self, runner, host, port, *args, **kwargs):
self.chroot = host
self.has_pipelining = False
self.become_methods_supported=C.BECOME_METHODS
if os.geteuid() != 0:
raise errors.AnsibleError("chroot connection requires running as root")
# we're running as root on the local system so do some
# trivial checks for ensuring 'host' is actually a chroot'able dir
if not os.path.isdir(self.chroot):
raise errors.AnsibleError("%s is not a directory" % self.chroot)
chrootsh = os.path.join(self.chroot, 'bin/sh')
if not utils.is_executable(chrootsh):
raise errors.AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
self.chroot_cmd = distutils.spawn.find_executable('chroot')
if not self.chroot_cmd:
raise errors.AnsibleError("chroot command not found in PATH")
self.runner = runner
self.host = host
# port is unused, since this is local
self.port = port
def connect(self, port=None):
''' connect to the chroot; nothing to do here '''
vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
return self
def _generate_cmd(self, executable, cmd):
if executable:
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
else:
# Prev to python2.7.3, shlex couldn't handle unicode type strings
cmd = to_bytes(cmd)
cmd = shlex.split(cmd)
local_cmd = [self.chroot_cmd, self.chroot]
local_cmd += cmd
return local_cmd
def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE):
''' run a command on the chroot. This is only needed for implementing
put_file() get_file() so that we don't have to read the whole file
into memory.
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
# We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])?
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.chroot)
p = subprocess.Popen(local_cmd, shell=False,
cwd=self.runner.basedir,
stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the chroot '''
p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to chroot '''
vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
try:
with open(in_path, 'rb') as in_file:
try:
p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file)
except OSError:
raise errors.AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
except IOError:
raise errors.AnsibleError("file or module does not exist at: %s" % in_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from chroot to local '''
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
try:
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None)
except OSError:
raise errors.AnsibleError("chroot connection requires dd command in the chroot")
with open(out_path, 'wb+') as out_file:
try:
chunk = p.stdout.read(BUFSIZE)
while chunk:
out_file.write(chunk)
chunk = p.stdout.read(BUFSIZE)
except:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def close(self):
''' terminate the connection; nothing to do here '''
pass
|
mccheung/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/xmlrpc/client.py
|
69
|
#
# XML-RPC CLIENT LIBRARY
# $Id$
#
# an XML-RPC client interface for Python.
#
# the marshalling and response parser code can also be used to
# implement XML-RPC servers.
#
# Notes:
# this version is designed to work with Python 2.1 or newer.
#
# History:
# 1999-01-14 fl Created
# 1999-01-15 fl Changed dateTime to use localtime
# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service
# 1999-01-19 fl Fixed array data element (from Skip Montanaro)
# 1999-01-21 fl Fixed dateTime constructor, etc.
# 1999-02-02 fl Added fault handling, handle empty sequences, etc.
# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro)
# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8)
# 2000-11-28 fl Changed boolean to check the truth value of its argument
# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches
# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1)
# 2001-03-28 fl Make sure response tuple is a singleton
# 2001-03-29 fl Don't require empty params element (from Nicholas Riley)
# 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2)
# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from Paul Prescod)
# 2001-09-03 fl Allow Transport subclass to override getparser
# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup)
# 2001-10-01 fl Remove containers from memo cache when done with them
# 2001-10-01 fl Use faster escape method (80% dumps speedup)
# 2001-10-02 fl More dumps microtuning
# 2001-10-04 fl Make sure import expat gets a parser (from Guido van Rossum)
# 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow
# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems)
# 2001-11-12 fl Use repr() to marshal doubles (from Paul Felix)
# 2002-03-17 fl Avoid buffered read when possible (from James Rucker)
# 2002-04-07 fl Added pythondoc comments
# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers
# 2002-05-15 fl Added error constants (from Andrew Kuchling)
# 2002-06-27 fl Merged with Python CVS version
# 2002-10-22 fl Added basic authentication (based on code from Phillip Eby)
# 2003-01-22 sm Add support for the bool type
# 2003-02-27 gvr Remove apply calls
# 2003-04-24 sm Use cStringIO if available
# 2003-04-25 ak Add support for nil
# 2003-06-15 gn Add support for time.struct_time
# 2003-07-12 gp Correct marshalling of Faults
# 2003-10-31 mvl Add multicall support
# 2004-08-20 mvl Bump minimum supported Python version to 2.1
#
# Copyright (c) 1999-2002 by Secret Labs AB.
# Copyright (c) 1999-2002 by Fredrik Lundh.
#
# info@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The XML-RPC client interface is
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
An XML-RPC client interface for Python.
The marshalling and response parser code can also be used to
implement XML-RPC servers.
Exported exceptions:
Error Base class for client errors
ProtocolError Indicates an HTTP protocol error
ResponseError Indicates a broken response package
Fault Indicates an XML-RPC fault package
Exported classes:
ServerProxy Represents a logical connection to an XML-RPC server
MultiCall Executor of boxcared xmlrpc requests
DateTime dateTime wrapper for an ISO 8601 string or time tuple or
localtime integer value to generate a "dateTime.iso8601"
XML-RPC value
Binary binary data wrapper
Marshaller Generate an XML-RPC params chunk from a Python data structure
Unmarshaller Unmarshal an XML-RPC response from incoming XML event message
Transport Handles an HTTP transaction to an XML-RPC server
SafeTransport Handles an HTTPS transaction to an XML-RPC server
Exported constants:
(none)
Exported functions:
getparser Create instance of the fastest available parser & attach
to an unmarshalling object
dumps Convert an argument tuple or a Fault instance to an XML-RPC
request (or response, if the methodresponse option is used).
loads Convert an XML-RPC packet to unmarshalled data plus a method
name (None if not present).
"""
import base64
import sys
import time
from datetime import datetime
import http.client
import urllib.parse
from xml.parsers import expat
import errno
from io import BytesIO
try:
import gzip
except ImportError:
gzip = None #python can be built without zlib/gzip support
# --------------------------------------------------------------------
# Internal stuff
def escape(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
return s.replace(">", ">",)
# used in User-Agent header sent
__version__ = sys.version[:3]
# xmlrpc integer limits
MAXINT = 2**31-1
MININT = -2**31
# --------------------------------------------------------------------
# Error constants (from Dan Libby's specification at
# http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php)
# Ranges of errors
PARSE_ERROR = -32700
SERVER_ERROR = -32600
APPLICATION_ERROR = -32500
SYSTEM_ERROR = -32400
TRANSPORT_ERROR = -32300
# Specific errors
NOT_WELLFORMED_ERROR = -32700
UNSUPPORTED_ENCODING = -32701
INVALID_ENCODING_CHAR = -32702
INVALID_XMLRPC = -32600
METHOD_NOT_FOUND = -32601
INVALID_METHOD_PARAMS = -32602
INTERNAL_ERROR = -32603
# --------------------------------------------------------------------
# Exceptions
##
# Base class for all kinds of client-side errors.
class Error(Exception):
"""Base class for client errors."""
def __str__(self):
return repr(self)
##
# Indicates an HTTP-level protocol error. This is raised by the HTTP
# transport layer, if the server returns an error code other than 200
# (OK).
#
# @param url The target URL.
# @param errcode The HTTP error code.
# @param errmsg The HTTP error message.
# @param headers The HTTP header dictionary.
class ProtocolError(Error):
"""Indicates an HTTP protocol error."""
def __init__(self, url, errcode, errmsg, headers):
Error.__init__(self)
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
def __repr__(self):
return (
"<ProtocolError for %s: %s %s>" %
(self.url, self.errcode, self.errmsg)
)
##
# Indicates a broken XML-RPC response package. This exception is
# raised by the unmarshalling layer, if the XML-RPC response is
# malformed.
class ResponseError(Error):
"""Indicates a broken response package."""
pass
##
# Indicates an XML-RPC fault response package. This exception is
# raised by the unmarshalling layer, if the XML-RPC response contains
# a fault string. This exception can also be used as a class, to
# generate a fault XML-RPC message.
#
# @param faultCode The XML-RPC fault code.
# @param faultString The XML-RPC fault string.
class Fault(Error):
"""Indicates an XML-RPC fault package."""
def __init__(self, faultCode, faultString, **extra):
Error.__init__(self)
self.faultCode = faultCode
self.faultString = faultString
def __repr__(self):
return "<Fault %s: %r>" % (self.faultCode, self.faultString)
# --------------------------------------------------------------------
# Special values
##
# Backwards compatibility
boolean = Boolean = bool
##
# Wrapper for XML-RPC DateTime values. This converts a time value to
# the format used by XML-RPC.
# <p>
# The value can be given as a datetime object, as a string in the
# format "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by
# time.localtime()), or an integer value (as returned by time.time()).
# The wrapper uses time.localtime() to convert an integer to a time
# tuple.
#
# @param value The time, given as a datetime object, an ISO 8601 string,
# a time tuple, or an integer time value.
# Issue #13305: different format codes across platforms
_day0 = datetime(1, 1, 1)
if _day0.strftime('%Y') == '0001': # Mac OS X
def _iso8601_format(value):
return value.strftime("%Y%m%dT%H:%M:%S")
elif _day0.strftime('%4Y') == '0001': # Linux
def _iso8601_format(value):
return value.strftime("%4Y%m%dT%H:%M:%S")
else:
def _iso8601_format(value):
return value.strftime("%Y%m%dT%H:%M:%S").zfill(17)
del _day0
def _strftime(value):
if isinstance(value, datetime):
return _iso8601_format(value)
if not isinstance(value, (tuple, time.struct_time)):
if value == 0:
value = time.time()
value = time.localtime(value)
return "%04d%02d%02dT%02d:%02d:%02d" % value[:6]
class DateTime:
"""DateTime wrapper for an ISO 8601 string or time tuple or
localtime integer value to generate 'dateTime.iso8601' XML-RPC
value.
"""
def __init__(self, value=0):
if isinstance(value, str):
self.value = value
else:
self.value = _strftime(value)
def make_comparable(self, other):
if isinstance(other, DateTime):
s = self.value
o = other.value
elif isinstance(other, datetime):
s = self.value
o = _iso8601_format(other)
elif isinstance(other, str):
s = self.value
o = other
elif hasattr(other, "timetuple"):
s = self.timetuple()
o = other.timetuple()
else:
otype = (hasattr(other, "__class__")
and other.__class__.__name__
or type(other))
raise TypeError("Can't compare %s and %s" %
(self.__class__.__name__, otype))
return s, o
def __lt__(self, other):
s, o = self.make_comparable(other)
return s < o
def __le__(self, other):
s, o = self.make_comparable(other)
return s <= o
def __gt__(self, other):
s, o = self.make_comparable(other)
return s > o
def __ge__(self, other):
s, o = self.make_comparable(other)
return s >= o
def __eq__(self, other):
s, o = self.make_comparable(other)
return s == o
def __ne__(self, other):
s, o = self.make_comparable(other)
return s != o
def timetuple(self):
return time.strptime(self.value, "%Y%m%dT%H:%M:%S")
##
# Get date/time value.
#
# @return Date/time value, as an ISO 8601 string.
def __str__(self):
return self.value
def __repr__(self):
return "<DateTime %r at %x>" % (self.value, id(self))
def decode(self, data):
self.value = str(data).strip()
def encode(self, out):
out.write("<value><dateTime.iso8601>")
out.write(self.value)
out.write("</dateTime.iso8601></value>\n")
def _datetime(data):
# decode xml element contents into a DateTime structure.
value = DateTime()
value.decode(data)
return value
def _datetime_type(data):
return datetime.strptime(data, "%Y%m%dT%H:%M:%S")
##
# Wrapper for binary data. This can be used to transport any kind
# of binary data over XML-RPC, using BASE64 encoding.
#
# @param data An 8-bit string containing arbitrary data.
class Binary:
"""Wrapper for binary data."""
def __init__(self, data=None):
if data is None:
data = b""
else:
if not isinstance(data, (bytes, bytearray)):
raise TypeError("expected bytes or bytearray, not %s" %
data.__class__.__name__)
data = bytes(data) # Make a copy of the bytes!
self.data = data
##
# Get buffer contents.
#
# @return Buffer contents, as an 8-bit string.
def __str__(self):
return str(self.data, "latin-1") # XXX encoding?!
def __eq__(self, other):
if isinstance(other, Binary):
other = other.data
return self.data == other
def __ne__(self, other):
if isinstance(other, Binary):
other = other.data
return self.data != other
def decode(self, data):
self.data = base64.decodebytes(data)
def encode(self, out):
out.write("<value><base64>\n")
encoded = base64.encodebytes(self.data)
out.write(encoded.decode('ascii'))
out.write("</base64></value>\n")
def _binary(data):
# decode xml element contents into a Binary structure
value = Binary()
value.decode(data)
return value
WRAPPERS = (DateTime, Binary)
# --------------------------------------------------------------------
# XML parsers
class ExpatParser:
# fast expat parser for Python 2.0 and later.
def __init__(self, target):
self._parser = parser = expat.ParserCreate(None, None)
self._target = target
parser.StartElementHandler = target.start
parser.EndElementHandler = target.end
parser.CharacterDataHandler = target.data
encoding = None
target.xml(encoding, None)
def feed(self, data):
self._parser.Parse(data, 0)
def close(self):
self._parser.Parse("", 1) # end of data
del self._target, self._parser # get rid of circular references
# --------------------------------------------------------------------
# XML-RPC marshalling and unmarshalling code
##
# XML-RPC marshaller.
#
# @param encoding Default encoding for 8-bit strings. The default
# value is None (interpreted as UTF-8).
# @see dumps
class Marshaller:
"""Generate an XML-RPC params chunk from a Python data structure.
Create a Marshaller instance for each set of parameters, and use
the "dumps" method to convert your data (represented as a tuple)
to an XML-RPC params chunk. To write a fault response, pass a
Fault instance instead. You may prefer to use the "dumps" module
function for this purpose.
"""
# by the way, if you don't understand what's going on in here,
# that's perfectly ok.
def __init__(self, encoding=None, allow_none=False):
self.memo = {}
self.data = None
self.encoding = encoding
self.allow_none = allow_none
dispatch = {}
def dumps(self, values):
out = []
write = out.append
dump = self.__dump
if isinstance(values, Fault):
# fault instance
write("<fault>\n")
dump({'faultCode': values.faultCode,
'faultString': values.faultString},
write)
write("</fault>\n")
else:
# parameter block
# FIXME: the xml-rpc specification allows us to leave out
# the entire <params> block if there are no parameters.
# however, changing this may break older code (including
# old versions of xmlrpclib.py), so this is better left as
# is for now. See @XMLRPC3 for more information. /F
write("<params>\n")
for v in values:
write("<param>\n")
dump(v, write)
write("</param>\n")
write("</params>\n")
result = "".join(out)
return result
def __dump(self, value, write):
try:
f = self.dispatch[type(value)]
except KeyError:
# check if this object can be marshalled as a structure
if not hasattr(value, '__dict__'):
raise TypeError("cannot marshal %s objects" % type(value))
# check if this class is a sub-class of a basic type,
# because we don't know how to marshal these types
# (e.g. a string sub-class)
for type_ in type(value).__mro__:
if type_ in self.dispatch.keys():
raise TypeError("cannot marshal %s objects" % type(value))
# XXX(twouters): using "_arbitrary_instance" as key as a quick-fix
# for the p3yk merge, this should probably be fixed more neatly.
f = self.dispatch["_arbitrary_instance"]
f(self, value, write)
def dump_nil (self, value, write):
if not self.allow_none:
raise TypeError("cannot marshal None unless allow_none is enabled")
write("<value><nil/></value>")
dispatch[type(None)] = dump_nil
def dump_bool(self, value, write):
write("<value><boolean>")
write(value and "1" or "0")
write("</boolean></value>\n")
dispatch[bool] = dump_bool
def dump_long(self, value, write):
if value > MAXINT or value < MININT:
raise OverflowError("int exceeds XML-RPC limits")
write("<value><int>")
write(str(int(value)))
write("</int></value>\n")
dispatch[int] = dump_long
# backward compatible
dump_int = dump_long
def dump_double(self, value, write):
write("<value><double>")
write(repr(value))
write("</double></value>\n")
dispatch[float] = dump_double
def dump_unicode(self, value, write, escape=escape):
write("<value><string>")
write(escape(value))
write("</string></value>\n")
dispatch[str] = dump_unicode
def dump_bytes(self, value, write):
write("<value><base64>\n")
encoded = base64.encodebytes(value)
write(encoded.decode('ascii'))
write("</base64></value>\n")
dispatch[bytes] = dump_bytes
dispatch[bytearray] = dump_bytes
def dump_array(self, value, write):
i = id(value)
if i in self.memo:
raise TypeError("cannot marshal recursive sequences")
self.memo[i] = None
dump = self.__dump
write("<value><array><data>\n")
for v in value:
dump(v, write)
write("</data></array></value>\n")
del self.memo[i]
dispatch[tuple] = dump_array
dispatch[list] = dump_array
def dump_struct(self, value, write, escape=escape):
i = id(value)
if i in self.memo:
raise TypeError("cannot marshal recursive dictionaries")
self.memo[i] = None
dump = self.__dump
write("<value><struct>\n")
for k, v in value.items():
write("<member>\n")
if not isinstance(k, str):
raise TypeError("dictionary key must be string")
write("<name>%s</name>\n" % escape(k))
dump(v, write)
write("</member>\n")
write("</struct></value>\n")
del self.memo[i]
dispatch[dict] = dump_struct
def dump_datetime(self, value, write):
write("<value><dateTime.iso8601>")
write(_strftime(value))
write("</dateTime.iso8601></value>\n")
dispatch[datetime] = dump_datetime
def dump_instance(self, value, write):
# check for special wrappers
if value.__class__ in WRAPPERS:
self.write = write
value.encode(self)
del self.write
else:
# store instance attributes as a struct (really?)
self.dump_struct(value.__dict__, write)
dispatch[DateTime] = dump_instance
dispatch[Binary] = dump_instance
# XXX(twouters): using "_arbitrary_instance" as key as a quick-fix
# for the p3yk merge, this should probably be fixed more neatly.
dispatch["_arbitrary_instance"] = dump_instance
##
# XML-RPC unmarshaller.
#
# @see loads
class Unmarshaller:
"""Unmarshal an XML-RPC response, based on incoming XML event
messages (start, data, end). Call close() to get the resulting
data structure.
Note that this reader is fairly tolerant, and gladly accepts bogus
XML-RPC data without complaining (but not bogus XML).
"""
# and again, if you don't understand what's going on in here,
# that's perfectly ok.
def __init__(self, use_datetime=False, use_builtin_types=False):
self._type = None
self._stack = []
self._marks = []
self._data = []
self._methodname = None
self._encoding = "utf-8"
self.append = self._stack.append
self._use_datetime = use_builtin_types or use_datetime
self._use_bytes = use_builtin_types
def close(self):
# return response tuple and target method
if self._type is None or self._marks:
raise ResponseError()
if self._type == "fault":
raise Fault(**self._stack[0])
return tuple(self._stack)
def getmethodname(self):
return self._methodname
#
# event handlers
def xml(self, encoding, standalone):
self._encoding = encoding
# FIXME: assert standalone == 1 ???
def start(self, tag, attrs):
# prepare to handle this element
if tag == "array" or tag == "struct":
self._marks.append(len(self._stack))
self._data = []
self._value = (tag == "value")
def data(self, text):
self._data.append(text)
def end(self, tag):
# call the appropriate end tag handler
try:
f = self.dispatch[tag]
except KeyError:
pass # unknown tag ?
else:
return f(self, "".join(self._data))
#
# accelerator support
def end_dispatch(self, tag, data):
# dispatch data
try:
f = self.dispatch[tag]
except KeyError:
pass # unknown tag ?
else:
return f(self, data)
#
# element decoders
dispatch = {}
def end_nil (self, data):
self.append(None)
self._value = 0
dispatch["nil"] = end_nil
def end_boolean(self, data):
if data == "0":
self.append(False)
elif data == "1":
self.append(True)
else:
raise TypeError("bad boolean value")
self._value = 0
dispatch["boolean"] = end_boolean
def end_int(self, data):
self.append(int(data))
self._value = 0
dispatch["i4"] = end_int
dispatch["i8"] = end_int
dispatch["int"] = end_int
def end_double(self, data):
self.append(float(data))
self._value = 0
dispatch["double"] = end_double
def end_string(self, data):
if self._encoding:
data = data.decode(self._encoding)
self.append(data)
self._value = 0
dispatch["string"] = end_string
dispatch["name"] = end_string # struct keys are always strings
def end_array(self, data):
mark = self._marks.pop()
# map arrays to Python lists
self._stack[mark:] = [self._stack[mark:]]
self._value = 0
dispatch["array"] = end_array
def end_struct(self, data):
mark = self._marks.pop()
# map structs to Python dictionaries
dict = {}
items = self._stack[mark:]
for i in range(0, len(items), 2):
dict[items[i]] = items[i+1]
self._stack[mark:] = [dict]
self._value = 0
dispatch["struct"] = end_struct
def end_base64(self, data):
value = Binary()
value.decode(data.encode("ascii"))
if self._use_bytes:
value = value.data
self.append(value)
self._value = 0
dispatch["base64"] = end_base64
def end_dateTime(self, data):
value = DateTime()
value.decode(data)
if self._use_datetime:
value = _datetime_type(data)
self.append(value)
dispatch["dateTime.iso8601"] = end_dateTime
def end_value(self, data):
# if we stumble upon a value element with no internal
# elements, treat it as a string element
if self._value:
self.end_string(data)
dispatch["value"] = end_value
def end_params(self, data):
self._type = "params"
dispatch["params"] = end_params
def end_fault(self, data):
self._type = "fault"
dispatch["fault"] = end_fault
def end_methodName(self, data):
if self._encoding:
data = data.decode(self._encoding)
self._methodname = data
self._type = "methodName" # no params
dispatch["methodName"] = end_methodName
## Multicall support
#
class _MultiCallMethod:
# some lesser magic to store calls made to a MultiCall object
# for batch execution
def __init__(self, call_list, name):
self.__call_list = call_list
self.__name = name
def __getattr__(self, name):
return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name))
def __call__(self, *args):
self.__call_list.append((self.__name, args))
class MultiCallIterator:
"""Iterates over the results of a multicall. Exceptions are
raised in response to xmlrpc faults."""
def __init__(self, results):
self.results = results
def __getitem__(self, i):
item = self.results[i]
if type(item) == type({}):
raise Fault(item['faultCode'], item['faultString'])
elif type(item) == type([]):
return item[0]
else:
raise ValueError("unexpected type in multicall result")
class MultiCall:
"""server -> a object used to boxcar method calls
server should be a ServerProxy object.
Methods can be added to the MultiCall using normal
method call syntax e.g.:
multicall = MultiCall(server_proxy)
multicall.add(2,3)
multicall.get_address("Guido")
To execute the multicall, call the MultiCall object e.g.:
add_result, address = multicall()
"""
def __init__(self, server):
self.__server = server
self.__call_list = []
def __repr__(self):
return "<MultiCall at %x>" % id(self)
__str__ = __repr__
def __getattr__(self, name):
return _MultiCallMethod(self.__call_list, name)
def __call__(self):
marshalled_list = []
for name, args in self.__call_list:
marshalled_list.append({'methodName' : name, 'params' : args})
return MultiCallIterator(self.__server.system.multicall(marshalled_list))
# --------------------------------------------------------------------
# convenience functions
FastMarshaller = FastParser = FastUnmarshaller = None
##
# Create a parser object, and connect it to an unmarshalling instance.
# This function picks the fastest available XML parser.
#
# return A (parser, unmarshaller) tuple.
def getparser(use_datetime=False, use_builtin_types=False):
"""getparser() -> parser, unmarshaller
Create an instance of the fastest available parser, and attach it
to an unmarshalling object. Return both objects.
"""
if FastParser and FastUnmarshaller:
if use_builtin_types:
mkdatetime = _datetime_type
mkbytes = base64.decodebytes
elif use_datetime:
mkdatetime = _datetime_type
mkbytes = _binary
else:
mkdatetime = _datetime
mkbytes = _binary
target = FastUnmarshaller(True, False, mkbytes, mkdatetime, Fault)
parser = FastParser(target)
else:
target = Unmarshaller(use_datetime=use_datetime, use_builtin_types=use_builtin_types)
if FastParser:
parser = FastParser(target)
else:
parser = ExpatParser(target)
return parser, target
##
# Convert a Python tuple or a Fault instance to an XML-RPC packet.
#
# @def dumps(params, **options)
# @param params A tuple or Fault instance.
# @keyparam methodname If given, create a methodCall request for
# this method name.
# @keyparam methodresponse If given, create a methodResponse packet.
# If used with a tuple, the tuple must be a singleton (that is,
# it must contain exactly one element).
# @keyparam encoding The packet encoding.
# @return A string containing marshalled data.
def dumps(params, methodname=None, methodresponse=None, encoding=None,
allow_none=False):
"""data [,options] -> marshalled data
Convert an argument tuple or a Fault instance to an XML-RPC
request (or response, if the methodresponse option is used).
In addition to the data object, the following options can be given
as keyword arguments:
methodname: the method name for a methodCall packet
methodresponse: true to create a methodResponse packet.
If this option is used with a tuple, the tuple must be
a singleton (i.e. it can contain only one element).
encoding: the packet encoding (default is UTF-8)
All byte strings in the data structure are assumed to use the
packet encoding. Unicode strings are automatically converted,
where necessary.
"""
assert isinstance(params, (tuple, Fault)), "argument must be tuple or Fault instance"
if isinstance(params, Fault):
methodresponse = 1
elif methodresponse and isinstance(params, tuple):
assert len(params) == 1, "response tuple must be a singleton"
if not encoding:
encoding = "utf-8"
if FastMarshaller:
m = FastMarshaller(encoding)
else:
m = Marshaller(encoding, allow_none)
data = m.dumps(params)
if encoding != "utf-8":
xmlheader = "<?xml version='1.0' encoding='%s'?>\n" % str(encoding)
else:
xmlheader = "<?xml version='1.0'?>\n" # utf-8 is default
# standard XML-RPC wrappings
if methodname:
# a method call
if not isinstance(methodname, str):
methodname = methodname.encode(encoding)
data = (
xmlheader,
"<methodCall>\n"
"<methodName>", methodname, "</methodName>\n",
data,
"</methodCall>\n"
)
elif methodresponse:
# a method response, or a fault structure
data = (
xmlheader,
"<methodResponse>\n",
data,
"</methodResponse>\n"
)
else:
return data # return as is
return "".join(data)
##
# Convert an XML-RPC packet to a Python object. If the XML-RPC packet
# represents a fault condition, this function raises a Fault exception.
#
# @param data An XML-RPC packet, given as an 8-bit string.
# @return A tuple containing the unpacked data, and the method name
# (None if not present).
# @see Fault
def loads(data, use_datetime=False, use_builtin_types=False):
"""data -> unmarshalled data, method name
Convert an XML-RPC packet to unmarshalled data plus a method
name (None if not present).
If the XML-RPC packet represents a fault condition, this function
raises a Fault exception.
"""
p, u = getparser(use_datetime=use_datetime, use_builtin_types=use_builtin_types)
p.feed(data)
p.close()
return u.close(), u.getmethodname()
##
# Encode a string using the gzip content encoding such as specified by the
# Content-Encoding: gzip
# in the HTTP header, as described in RFC 1952
#
# @param data the unencoded data
# @return the encoded data
def gzip_encode(data):
"""data -> gzip encoded data
Encode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
f = BytesIO()
gzf = gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1)
gzf.write(data)
gzf.close()
encoded = f.getvalue()
f.close()
return encoded
##
# Decode a string using the gzip content encoding such as specified by the
# Content-Encoding: gzip
# in the HTTP header, as described in RFC 1952
#
# @param data The encoded data
# @return the unencoded data
# @raises ValueError if data is not correctly coded.
def gzip_decode(data):
"""gzip encoded data -> unencoded data
Decode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
f = BytesIO(data)
gzf = gzip.GzipFile(mode="rb", fileobj=f)
try:
decoded = gzf.read()
except OSError:
raise ValueError("invalid data")
f.close()
gzf.close()
return decoded
##
# Return a decoded file-like object for the gzip encoding
# as described in RFC 1952.
#
# @param response A stream supporting a read() method
# @return a file-like object that the decoded data can be read() from
class GzipDecodedResponse(gzip.GzipFile if gzip else object):
"""a file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
"""
def __init__(self, response):
#response doesn't support tell() and read(), required by
#GzipFile
if not gzip:
raise NotImplementedError
self.io = BytesIO(response.read())
gzip.GzipFile.__init__(self, mode="rb", fileobj=self.io)
def close(self):
gzip.GzipFile.close(self)
self.io.close()
# --------------------------------------------------------------------
# request dispatcher
class _Method:
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
##
# Standard transport class for XML-RPC over HTTP.
# <p>
# You can create custom transports by subclassing this method, and
# overriding selected methods.
class Transport:
"""Handles an HTTP transaction to an XML-RPC server."""
# client identifier (may be overridden)
user_agent = "Python-xmlrpc/%s" % __version__
#if true, we'll request gzip encoding
accept_gzip_encoding = True
# if positive, encode request using gzip if it exceeds this threshold
# note that many server will get confused, so only use it if you know
# that they can decode such a request
encode_threshold = None #None = don't encode
def __init__(self, use_datetime=False, use_builtin_types=False):
self._use_datetime = use_datetime
self._use_builtin_types = use_builtin_types
self._connection = (None, None)
self._extra_headers = []
##
# Send a complete request, and parse the response.
# Retry request if a cached connection has disconnected.
#
# @param host Target host.
# @param handler Target PRC handler.
# @param request_body XML-RPC request body.
# @param verbose Debugging flag.
# @return Parsed response.
def request(self, host, handler, request_body, verbose=False):
#retry request once if cached connection has gone cold
for i in (0, 1):
try:
return self.single_request(host, handler, request_body, verbose)
except OSError as e:
if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED,
errno.EPIPE):
raise
except http.client.BadStatusLine: #close after we sent request
if i:
raise
def single_request(self, host, handler, request_body, verbose=False):
# issue XML-RPC request
try:
http_conn = self.send_request(host, handler, request_body, verbose)
resp = http_conn.getresponse()
if resp.status == 200:
self.verbose = verbose
return self.parse_response(resp)
except Fault:
raise
except Exception:
#All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
#We got an error response.
#Discard any response data and raise exception
if resp.getheader("content-length", ""):
resp.read()
raise ProtocolError(
host + handler,
resp.status, resp.reason,
dict(resp.getheaders())
)
##
# Create parser.
#
# @return A 2-tuple containing a parser and a unmarshaller.
def getparser(self):
# get parser and unmarshaller
return getparser(use_datetime=self._use_datetime,
use_builtin_types=self._use_builtin_types)
##
# Get authorization info from host parameter
# Host may be a string, or a (host, x509-dict) tuple; if a string,
# it is checked for a "user:pw@host" format, and a "Basic
# Authentication" header is added if appropriate.
#
# @param host Host descriptor (URL or (URL, x509 info) tuple).
# @return A 3-tuple containing (actual host, extra headers,
# x509 info). The header and x509 fields may be None.
def get_host_info(self, host):
x509 = {}
if isinstance(host, tuple):
host, x509 = host
auth, host = urllib.parse.splituser(host)
if auth:
auth = urllib.parse.unquote_to_bytes(auth)
auth = base64.encodebytes(auth).decode("utf-8")
auth = "".join(auth.split()) # get rid of whitespace
extra_headers = [
("Authorization", "Basic " + auth)
]
else:
extra_headers = []
return host, extra_headers, x509
##
# Connect to server.
#
# @param host Target host.
# @return An HTTPConnection object
def make_connection(self, host):
#return an existing connection if possible. This allows
#HTTP/1.1 keep-alive.
if self._connection and host == self._connection[0]:
return self._connection[1]
# create a HTTP connection object from a host descriptor
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, http.client.HTTPConnection(chost)
return self._connection[1]
##
# Clear any cached connection object.
# Used in the event of socket errors.
#
def close(self):
if self._connection[1]:
self._connection[1].close()
self._connection = (None, None)
##
# Send HTTP request.
#
# @param host Host descriptor (URL or (URL, x509 info) tuple).
# @param handler Targer RPC handler (a path relative to host)
# @param request_body The XML-RPC request body
# @param debug Enable debugging if debug is true.
# @return An HTTPConnection.
def send_request(self, host, handler, request_body, debug):
connection = self.make_connection(host)
headers = self._extra_headers[:]
if debug:
connection.set_debuglevel(1)
if self.accept_gzip_encoding and gzip:
connection.putrequest("POST", handler, skip_accept_encoding=True)
headers.append(("Accept-Encoding", "gzip"))
else:
connection.putrequest("POST", handler)
headers.append(("Content-Type", "text/xml"))
headers.append(("User-Agent", self.user_agent))
self.send_headers(connection, headers)
self.send_content(connection, request_body)
return connection
##
# Send request headers.
# This function provides a useful hook for subclassing
#
# @param connection httpConnection.
# @param headers list of key,value pairs for HTTP headers
def send_headers(self, connection, headers):
for key, val in headers:
connection.putheader(key, val)
##
# Send request body.
# This function provides a useful hook for subclassing
#
# @param connection httpConnection.
# @param request_body XML-RPC request body.
def send_content(self, connection, request_body):
#optionally encode the request
if (self.encode_threshold is not None and
self.encode_threshold < len(request_body) and
gzip):
connection.putheader("Content-Encoding", "gzip")
request_body = gzip_encode(request_body)
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders(request_body)
##
# Parse response.
#
# @param file Stream.
# @return Response tuple and target method.
def parse_response(self, response):
# read response data from httpresponse, and parse it
# Check for new http response object, otherwise it is a file object.
if hasattr(response, 'getheader'):
if response.getheader("Content-Encoding", "") == "gzip":
stream = GzipDecodedResponse(response)
else:
stream = response
else:
stream = response
p, u = self.getparser()
while 1:
data = stream.read(1024)
if not data:
break
if self.verbose:
print("body:", repr(data))
p.feed(data)
if stream is not response:
stream.close()
p.close()
return u.close()
##
# Standard transport class for XML-RPC over HTTPS.
class SafeTransport(Transport):
"""Handles an HTTPS transaction to an XML-RPC server."""
# FIXME: mostly untested
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
if not hasattr(http.client, "HTTPSConnection"):
raise NotImplementedError(
"your version of http.client doesn't support HTTPS")
# create a HTTPS connection object from a host descriptor
# host may be a string, or a (host, x509-dict) tuple
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, http.client.HTTPSConnection(chost,
None, **(x509 or {}))
return self._connection[1]
##
# Standard server proxy. This class establishes a virtual connection
# to an XML-RPC server.
# <p>
# This class is available as ServerProxy and Server. New code should
# use ServerProxy, to avoid confusion.
#
# @def ServerProxy(uri, **options)
# @param uri The connection point on the server.
# @keyparam transport A transport factory, compatible with the
# standard transport class.
# @keyparam encoding The default encoding used for 8-bit strings
# (default is UTF-8).
# @keyparam verbose Use a true value to enable debugging output.
# (printed to standard output).
# @see Transport
class ServerProxy:
"""uri [,options] -> a logical connection to an XML-RPC server
uri is the connection point on the server, given as
scheme://host/target.
The standard implementation always supports the "http" scheme. If
SSL socket support is available (Python 2.0), it also supports
"https".
If the target part and the slash preceding it are both omitted,
"/RPC2" is assumed.
The following options can be given as keyword arguments:
transport: a transport factory
encoding: the request encoding (default is UTF-8)
All 8-bit strings passed to the server proxy are assumed to use
the given encoding.
"""
def __init__(self, uri, transport=None, encoding=None, verbose=False,
allow_none=False, use_datetime=False, use_builtin_types=False):
# establish a "logical" server connection
# get the url
type, uri = urllib.parse.splittype(uri)
if type not in ("http", "https"):
raise OSError("unsupported XML-RPC protocol")
self.__host, self.__handler = urllib.parse.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
if type == "https":
handler = SafeTransport
else:
handler = Transport
transport = handler(use_datetime=use_datetime,
use_builtin_types=use_builtin_types)
self.__transport = transport
self.__encoding = encoding or 'utf-8'
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, methodname, params):
# call a method on the remote server
request = dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none).encode(self.__encoding)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<ServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return _Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def __call__(self, attr):
"""A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == "close":
return self.__close
elif attr == "transport":
return self.__transport
raise AttributeError("Attribute %r not found" % (attr,))
# compatibility
Server = ServerProxy
# --------------------------------------------------------------------
# test code
if __name__ == "__main__":
# simple test program (from the XML-RPC specification)
# local server, available from Lib/xmlrpc/server.py
server = ServerProxy("http://localhost:8000")
try:
print(server.currentTime.getCurrentTime())
except Error as v:
print("ERROR", v)
multi = MultiCall(server)
multi.getData()
multi.pow(2,9)
multi.add(1,2)
try:
for response in multi():
print(response)
except Error as v:
print("ERROR", v)
|
tima/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_disk_facts.py
|
9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Marc Tschapek <marc.tschapek@itelligence.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_disk_facts
version_added: '2.5'
short_description: Show the attached disks and disk information of the target host
description:
- With the module you can retrieve and output detailed information about the attached disks of the target and
it's volumes and partitions if existent.
requirements:
- Windows 8.1 / Windows 2012 (NT 6.2)
author:
- Marc Tschapek (@marqelme)
notes:
- In order to understand all the returned properties and values please visit the following site and open the respective MSFT class
U(https://msdn.microsoft.com/en-us/library/windows/desktop/hh830612.aspx)
'''
EXAMPLES = r'''
- name: get disk facts
win_disk_facts:
- name: output first disk size
debug:
var: ansible_facts.disks[0].size
- name: get disk facts
win_disk_facts:
- name: output second disk serial number
debug:
var: ansible_facts.disks[0].serial_number
'''
RETURN = r'''
ansible_facts:
description: Dictionary containing all the detailed information about the disks of the target.
returned: always
type: complex
contains:
total_disks:
description: Count of found disks on the target.
returned: if disks were found
type: int
sample: 3
disks:
description: Detailed information about one particular disk.
returned: if disks were found
type: list
contains:
number:
description: Disk number of the particular disk.
returned: always
type: int
sample: 0
size:
description: Size in Gibibyte of the particular disk.
returned: always
type: string
sample: "100GiB"
bus_type:
description: Bus type of the particular disk.
returned: always
type: string
sample: "SCSI"
friendly_name:
description: Friendly name of the particular disk.
returned: always
type: string
sample: "Red Hat VirtIO SCSI Disk Device"
partition_style:
description: Partition style of the particular disk.
returned: always
type: string
sample: "MBR"
partition_count:
description: Number of partitions on the particular disk.
returned: always
type: int
sample: 4
operational_status:
description: Operational status of the particular disk.
returned: always
type: string
sample: "Online"
sector_size:
description: Sector size in byte of the particular disk.
returned: always
type: string
sample: "512s/byte/bytes/"
read_only:
description: Read only status of the particular disk.
returned: always
type: string
sample: "true"
bootable:
description: Information whether the particular disk is a bootable disk.
returned: always
type: string
sample: "false"
system_disk:
description: Information whether the particular disk is a system disk.
returned: always
type: string
sample: "true"
clustered:
description: Information whether the particular disk is clustered (part of a failover cluster).
returned: always
type: string
sample: "false"
manufacturer:
description: Manufacturer of the particular disk.
returned: always
type: string
sample: "Red Hat"
model:
description: Model specification of the particular disk.
returned: always
type: string
sample: "VirtIO"
firmware_version:
description: Firmware version of the particular disk.
returned: always
type: string
sample: "0001"
location:
description: Location of the particular disk on the target.
returned: always
type: string
sample: "PCIROOT(0)#PCI(0400)#SCSI(P00T00L00)"
serial_number:
description: Serial number of the particular disk on the target.
returned: always
type: string
sample: "b62beac80c3645e5877f"
unique_id:
description: Unique ID of the particular disk on the target.
returned: always
type: string
sample: "3141463431303031"
guid:
description: GUID of the particular disk on the target.
returned: if existent
type: string
sample: "{efa5f928-57b9-47fc-ae3e-902e85fbe77f}"
path:
description: Path of the particular disk on the target.
returned: always
type: string
sample: "\\\\?\\scsi#disk&ven_red_hat&prod_virtio#4&23208fd0&1&000000#{<id>}"
partitions:
description: Detailed information about one particular partition on the specified disk.
returned: if existent
type: list
contains:
number:
description: Number of the particular partition.
returned: always
type: int
sample: 1
size:
description:
- Size in Gibibyte of the particular partition.
- Accurate to three decimal places.
returned: always
type: string
sample: "0.031GiB"
type:
description: Type of the particular partition.
returned: always
type: string
sample: "IFS"
gpt_type:
description: gpt type of the particular partition.
returned: if partition_style property of the particular disk has value "GPT"
type: string
sample: "{e3c9e316-0b5c-4db8-817d-f92df00215ae}"
no_default_driveletter:
description: Information whether the particular partition has a default drive letter or not.
returned: if partition_style property of the particular disk has value "GPT"
type: string
sample: "true"
mbr_type:
description: mbr type of the particular partition.
returned: if partition_style property of the particular disk has value "MBR"
type: int
sample: 7
active:
description: Information whether the particular partition is an active partition or not.
returned: if partition_style property of the particular disk has value "MBR"
type: string
sample: "true"
drive_letter:
description: Drive letter of the particular partition.
returned: if existent
type: string
sample: "C"
transition_state:
description: Transition state of the particular partition.
returned: always
type: int
sample: 1
offset:
description: Offset of the particular partition.
returned: always
type: int
sample: 368050176
hidden:
description: Information whether the particular partition is hidden or not.
returned: always
type: string
sample: "true"
shadow_copy:
description: Information whether the particular partition is a shadow copy of another partition.
returned: always
type: string
sample: "false"
guid:
description: GUID of the particular partition.
returned: if existent
type: string
sample: "{302e475c-6e64-4674-a8e2-2f1c7018bf97}"
access_paths:
description: Access paths of the particular partition.
returned: if existent
type: string
sample: "\\\\?\\Volume{85bdc4a8-f8eb-11e6-80fa-806e6f6e6963}\\"
volumes:
description: Detailed information about one particular volume on the specified partition.
returned: if existent
type: list
contains:
size:
description:
- Size in Gibibyte of the particular volume.
- Accurate to three decimal places.
returned: always
type: string
sample: "0,342GiB"
size_remaining:
description:
- Remaining size in Gibibyte of the particular volume.
- Accurate to three decimal places.
returned: always
type: string
sample: "0,146GiB"
type:
description: File system type of the particular volume.
returned: always
type: string
sample: "NTFS"
label:
description: File system label of the particular volume.
returned: always
type: string
sample: "System Reserved"
health_status:
description: Health status of the particular volume.
returned: always
type: string
sample: "Healthy"
drive_type:
description: Drive type of the particular volume.
returned: always
type: string
sample: "Fixed"
allocation_unit_size:
description: Allocation unit size in Kibibyte of the particular volume.
returned: always
type: string
sample: "64KiB"
object_id:
description: Object ID of the particular volume.
returned: always
type: string
sample: "\\\\?\\Volume{85bdc4a9-f8eb-11e6-80fa-806e6f6e6963}\\"
path:
description: Path of the particular volume.
returned: always
type: string
sample: "\\\\?\\Volume{85bdc4a9-f8eb-11e6-80fa-806e6f6e6963}\\"
physical_disk:
description: Detailed information about physical disk properties of the particular disk.
returned: if existent
type: complex
contains:
media_type:
description: Media type of the particular physical disk.
returned: always
type: string
sample: "UnSpecified"
size:
description:
- Size in Gibibyte of the particular physical disk.
- Accurate to three decimal places.
returned: always
type: string
sample: "200GiB"
allocated_size:
description:
- Allocated size in Gibibyte of the particular physical disk.
- Accurate to three decimal places.
returned: always
type: string
sample: "100GiB"
device_id:
description: Device ID of the particular physical disk.
returned: always
type: string
sample: "0"
friendly_name:
description: Friendly name of the particular physical disk.
returned: always
type: string
sample: "PhysicalDisk0"
operational_status:
description: Operational status of the particular physical disk.
returned: always
type: string
sample: "OK"
health_status:
description: Health status of the particular physical disk.
returned: always
type: string
sample: "Healthy"
bus_type:
description: Bus type of the particular physical disk.
returned: always
type: string
sample: "SCSI"
usage_type:
description: Usage type of the particular physical disk.
returned: always
type: string
sample: "Auto-Select"
supported_usages:
description: Supported usage types of the particular physical disk.
returned: always
type: complex
contains:
Count:
description: Count of supported usage types.
returned: always
type: int
sample: 5
value:
description: List of supported usage types.
returned: always
type: string
sample: "Auto-Select, Hot Spare"
spindle_speed:
description: Spindle speed in rpm of the particular physical disk.
returned: always
type: string
sample: "4294967295rpm"
physical_location:
description: Physical location of the particular physical disk.
returned: always
type: string
sample: "Integrated : Adapter 3 : Port 0 : Target 0 : LUN 0"
manufacturer:
description: Manufacturer of the particular physical disk.
returned: always
type: string
sample: "SUSE"
model:
description: Model of the particular physical disk.
returned: always
type: string
sample: "Xen Block"
can_pool:
description: Information whether the particular physical disk can be added to a storage pool.
returned: always
type: string
sample: "false"
cannot_pool_reason:
description: Information why the particular physical disk can not be added to a storage pool.
returned: if can_pool property has value false
type: string
sample: "Insufficient Capacity"
indication_enabled:
description: Information whether indication is enabled for the particular physical disk.
returned: always
type: string
sample: "True"
partial:
description: Information whether the particular physical disk is partial.
returned: always
type: string
sample: "False"
serial_number:
description: Serial number of the particular physical disk.
returned: always
type: string
sample: "b62beac80c3645e5877f"
object_id:
description: Object ID of the particular physical disk.
returned: always
type: string
sample: '{1}\\\\HOST\\root/Microsoft/Windows/Storage/Providers_v2\\SPACES_PhysicalDisk.ObjectId=\"{<object_id>}:PD:{<pd>}\"'
unique_id:
description: Unique ID of the particular physical disk.
returned: always
type: string
sample: "3141463431303031"
virtual_disk:
description: Detailed information about virtual disk properties of the particular disk.
returned: if existent
type: complex
contains:
size:
description:
- Size in Gibibyte of the particular virtual disk.
- Accurate to three decimal places.
returned: always
type: string
sample: "300GiB"
allocated_size:
description:
- Allocated size in Gibibyte of the particular virtual disk.
- Accurate to three decimal places.
returned: always
type: string
sample: "100GiB"
footprint_on_pool:
description:
- Footprint on pool in Gibibyte of the particular virtual disk.
- Accurate to three decimal places.
returned: always
type: string
sample: "100GiB"
name:
description: Name of the particular virtual disk.
returned: always
type: string
sample: "vDisk1"
friendly_name:
description: Friendly name of the particular virtual disk.
returned: always
type: string
sample: "Prod2 Virtual Disk"
operational_status:
description: Operational status of the particular virtual disk.
returned: always
type: string
sample: "OK"
health_status:
description: Health status of the particular virtual disk.
returned: always
type: string
sample: "Healthy"
provisioning_type:
description: Provisioning type of the particular virtual disk.
returned: always
type: string
sample: "Thin"
allocation_unit_size:
description: Allocation unit size in Kibibyte of the particular virtual disk.
returned: always
type: string
sample: "4KiB"
media_type:
description: Media type of the particular virtual disk.
returned: always
type: string
sample: "Unspecified"
parity_layout:
description: Parity layout of the particular virtual disk.
returned: if existent
type: int
sample: 1
access:
description: Access of the particular virtual disk.
returned: always
type: string
sample: "Read/Write"
detached_reason:
description: Detached reason of the particular virtual disk.
returned: always
type: string
sample: "None"
write_cache_size:
description: Write cache size in byte of the particular virtual disk.
returned: always
type: string
sample: "100s/byte/bytes/"
fault_domain_awareness:
description: Fault domain awareness of the particular virtual disk.
returned: always
type: string
sample: "PhysicalDisk"
inter_leave:
description:
- Inter leave in Kibibyte of the particular virtual disk.
- Accurate to three decimal places.
returned: always
type: string
sample: "100KiB"
deduplication_enabled:
description: Information whether deduplication is enabled for the particular virtual disk.
returned: always
type: string
sample: "True"
enclosure_aware:
description: Information whether the particular virtual disk is enclosure aware.
returned: always
type: string
sample: "False"
manual_attach:
description: Information whether the particular virtual disk is manual attached.
returned: always
type: string
sample: "True"
snapshot:
description: Information whether the particular virtual disk is a snapshot.
returned: always
type: string
sample: "False"
tiered:
description: Information whether the particular virtual disk is tiered.
returned: always
type: string
sample: "True"
physical_sector_size:
description: Physical sector size in Kibibyte of the particular virtual disk.
returned: always
type: string
sample: "4KiB"
logical_sector_size:
description: Logical sector size in byte of the particular virtual disk.
returned: always
type: string
sample: "512s/byte/bytes/"
available_copies:
description: Number of the available copies of the particular virtual disk.
returned: if existent
type: int
sample: 1
columns:
description: Number of the columns of the particular virtual disk.
returned: always
type: int
sample: 2
groups:
description: Number of the groups of the particular virtual disk.
returned: always
type: int
sample: 1
physical_disk_redundancy:
description: Type of the physical disk redundancy of the particular virtual disk.
returned: always
type: int
sample: 1
read_cache_size:
description: Read cache size in byte of the particular virtual disk.
returned: always
type: int
sample: 0
request_no_spof:
description: Information whether the particular virtual disk requests no single point of failure.
returned: always
type: string
sample: "True"
resiliency_setting_name:
description: Type of the physical disk redundancy of the particular virtual disk.
returned: always
type: int
sample: 1
object_id:
description: Object ID of the particular virtual disk.
returned: always
type: string
sample: '{1}\\\\HOST\\root/Microsoft/Windows/Storage/Providers_v2\\SPACES_VirtualDisk.ObjectId=\"{<object_id>}:VD:{<vd>}\"'
unique_id:
description: Unique ID of the particular virtual disk.
returned: always
type: string
sample: "260542E4C6B01D47A8FA7630FD90FFDE"
unique_id_format:
description: Unique ID format of the particular virtual disk.
returned: always
type: string
sample: "Vendor Specific"
'''
|
kaaustubh/pjsip
|
refs/heads/master
|
tests/pjsua/scripts-sendto/140_sdp_with_direction_attr_in_session_1.py
|
58
|
# $Id: 140_sdp_with_direction_attr_in_session_1.py 3086 2010-02-03 14:43:25Z nanang $
import inc_sip as sip
import inc_sdp as sdp
# Offer contains "sendonly" attribute in the session. Answer should
# respond with appropriate direction in session or media
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=-
c=IN IP4 127.0.0.1
t=0 0
a=sendonly
m=audio 5000 RTP/AVP 0
"""
pjsua_args = "--null-audio --auto-answer 200"
extra_headers = ""
include = ["Content-Type: application/sdp", # response must include SDP
"a=recvonly"
]
exclude = []
sendto_cfg = sip.SendtoCfg("SDP direction in session", pjsua_args, sdp, 200,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
|
le9i0nx/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/redshift_facts.py
|
52
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redshift_facts
author: "Jens Carl (@j-carl)"
short_description: Gather facts about Redshift cluster(s)
description:
- Gather facts about Redshift cluster(s)
version_added: "2.4"
requirements: [ boto3 ]
options:
cluster_identifier:
description:
- The prefix of cluster identifier of the Redshift cluster you are searching for.
- "This is a regular expression match with implicit '^'. Append '$' for a complete match."
required: false
aliases: ['name', 'identifier']
tags:
description:
- "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' }
to match against the security group(s) you are searching for."
required: false
extends_documentation_fragment:
- ec2
- aws
'''
EXAMPLES = '''
# Note: These examples do net set authentication details, see the AWS guide for details.
# Find all clusters
- redshift_facts:
register: redshift
# Find cluster(s) with matching tags
- redshift_facts:
tags:
env: prd
stack: monitoring
register: redshift_tags
# Find cluster(s) with matching name/prefix and tags
- redshift_facts:
tags:
env: dev
stack: web
name: user-
register: redshift_web
# Fail if no cluster(s) is/are found
- redshift_facts:
tags:
env: stg
stack: db
register: redshift_user
failed_when: "{{ redshift_user.results | length == 0 }}"
'''
RETURN = '''
# For more information see U(http://boto3.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters)
---
cluster_identifier:
description: Unique key to identify the cluster.
returned: success
type: string
sample: "redshift-identifier"
node_type:
description: The node type for nodes in the cluster.
returned: success
type: string
sample: "ds2.xlarge"
cluster_status:
description: Current state of the cluster.
returned: success
type: string
sample: "available"
modify_status:
description: The status of a modify operation.
returned: optional
type: string
sample: ""
master_username:
description: The master user name for the cluster.
returned: success
type: string
sample: "admin"
db_name:
description: The name of the initial database that was created when the cluster was created.
returned: success
type: string
sample: "dev"
endpoint:
description: The connection endpoint.
returned: success
type: string
sample: {
"address": "cluster-ds2.ocmugla0rf.us-east-1.redshift.amazonaws.com",
"port": 5439
}
cluster_create_time:
description: The date and time that the cluster was created.
returned: success
type: string
sample: "2016-05-10T08:33:16.629000+00:00"
automated_snapshot_retention_period:
description: The number of days that automatic cluster snapshots are retained.
returned: success
type: int
sample: 1
cluster_security_groups:
description: A list of cluster security groups that are associated with the cluster.
returned: success
type: list
sample: []
vpc_security_groups:
description: A list of VPC security groups the are associated with the cluster.
returned: success
type: list
sample: [
{
"status": "active",
"vpc_security_group_id": "sg-12cghhg"
}
]
cluster_paramater_groups:
description: The list of cluster parameters that are associated with this cluster.
returned: success
type: list
sample: [
{
"cluster_parameter_status_list": [
{
"parameter_apply_status": "in-sync",
"parameter_name": "statement_timeout"
},
{
"parameter_apply_status": "in-sync",
"parameter_name": "require_ssl"
}
],
"parameter_apply_status": "in-sync",
"parameter_group_name": "tuba"
}
]
cluster_subnet_group_name:
description: The name of the subnet group that is associated with the cluster.
returned: success
type: string
sample: "redshift-subnet"
vpc_id:
description: The identifier of the VPC the cluster is in, if the cluster is in a VPC.
returned: success
type: string
sample: "vpc-1234567"
availibility_zone:
description: The name of the Availability Zone in which the cluster is located.
returned: success
type: string
sample: "us-east-1b"
preferred_maintenance_window:
description: The weekly time range, in Universal Coordinated Time (UTC), during which system maintenance can occur.
returned: success
type: string
sample: "tue:07:30-tue:08:00"
pending_modified_values:
description: A value that, if present, indicates that changes to the cluster are pending.
returned: success
type: dict
sample: {}
cluster_version:
description: The version ID of the Amazon Redshift engine that is running on the cluster.
returned: success
type: string
sample: "1.0"
allow_version_upgrade:
description: >
A Boolean value that, if true, indicates that major version upgrades will be applied
automatically to the cluster during the maintenance window.
returned: success
type: boolean
sample: true|false
number_of_nodes:
description: The number of compute nodes in the cluster.
returned: success
type: int
sample: 12
publicly_accessible:
description: A Boolean value that, if true , indicates that the cluster can be accessed from a public network.
returned: success
type: boolean
sample: true|false
encrypted:
description: Boolean value that, if true , indicates that data in the cluster is encrypted at rest.
returned: success
type: boolean
sample: true|false
restore_status:
description: A value that describes the status of a cluster restore action.
returned: success
type: dict
sample: {}
hsm_status:
description: >
A value that reports whether the Amazon Redshift cluster has finished applying any hardware
security module (HSM) settings changes specified in a modify cluster command.
returned: success
type: dict
sample: {}
cluster_snapshot_copy_status:
description: A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
returned: success
type: dict
sample: {}
cluster_public_keys:
description: The public key for the cluster.
returned: success
type: string
sample: "ssh-rsa anjigfam Amazon-Redshift\n"
cluster_nodes:
description: The nodes in the cluster.
returned: success
type: list
sample: [
{
"node_role": "LEADER",
"private_ip_address": "10.0.0.1",
"public_ip_address": "x.x.x.x"
},
{
"node_role": "COMPUTE-1",
"private_ip_address": "10.0.0.3",
"public_ip_address": "x.x.x.x"
}
]
elastic_ip_status:
description: The status of the elastic IP (EIP) address.
returned: success
type: dict
sample: {}
cluster_revision_number:
description: The specific revision number of the database in the cluster.
returned: success
type: string
sample: "1231"
tags:
description: The list of tags for the cluster.
returned: success
type: list
sample: []
kms_key_id:
description: The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
returned: success
type: string
sample: ""
enhanced_vpc_routing:
description: An option that specifies whether to create the cluster with enhanced VPC routing enabled.
returned: success
type: boolean
sample: true|false
iam_roles:
description: List of IAM roles attached to the cluster.
returned: success
type: list
sample: []
'''
import re
import traceback
try:
from botocore.exception import ClientError
except ImportError:
pass # will be picked up from imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, get_aws_connection_info
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict
def match_tags(tags_to_match, cluster):
for key, value in tags_to_match.items():
for tag in cluster['Tags']:
if key == tag['Key'] and value == tag['Value']:
return True
return False
def find_clusters(conn, module, identifier=None, tags=None):
try:
cluster_paginator = conn.get_paginator('describe_clusters')
clusters = cluster_paginator.paginate().build_full_result()
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
matched_clusters = []
if identifier is not None:
identifier_prog = re.compile('^' + identifier)
for cluster in clusters['Clusters']:
matched_identifier = True
if identifier:
matched_identifier = identifier_prog.search(cluster['ClusterIdentifier'])
matched_tags = True
if tags:
matched_tags = match_tags(tags, cluster)
if matched_identifier and matched_tags:
matched_clusters.append(camel_dict_to_snake_dict(cluster))
return matched_clusters
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
cluster_identifier=dict(type='str', aliases=['identifier', 'name']),
tags=dict(type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
cluster_identifier = module.params.get('cluster_identifier')
cluster_tags = module.params.get('tags')
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
redshift = boto3_conn(module, conn_type='client', resource='redshift', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
results = find_clusters(redshift, module, identifier=cluster_identifier, tags=cluster_tags)
module.exit_json(results=results)
if __name__ == '__main__':
main()
|
vvv1559/intellij-community
|
refs/heads/master
|
python/testData/quickdoc/OptionalParameterType.py
|
31
|
def open(encoding: str = None, errors: str = None):
pass
<the_ref>open()
|
jonboy11171/support-tools
|
refs/heads/master
|
googlecode-issues-exporter/bitbucket_issue_converter_test.py
|
90
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the BitBucket Services."""
# pylint: disable=missing-docstring,protected-access
import unittest
import bitbucket_issue_converter
import issues
from issues_test import DEFAULT_USERNAME
from issues_test import SINGLE_COMMENT
from issues_test import SINGLE_ISSUE
from issues_test import COMMENT_ONE
from issues_test import COMMENT_TWO
from issues_test import COMMENT_THREE
from issues_test import COMMENTS_DATA
from issues_test import NO_ISSUE_DATA
from issues_test import USER_MAP
# The BitBucket username.
BITBUCKET_USERNAME = DEFAULT_USERNAME
# The BitBucket repo name.
BITBUCKET_REPO = "repo"
class TestUserService(unittest.TestCase):
"""Tests for the UserService."""
def setUp(self):
self._bitbucket_user_service = bitbucket_issue_converter.UserService()
def testIsUser123(self):
is_user = self._bitbucket_user_service.IsUser("username123")
self.assertTrue(is_user)
def testIsUser321(self):
is_user = self._bitbucket_user_service.IsUser("username321")
self.assertTrue(is_user)
class TestIssueService(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self._bitbucket_issue_service = bitbucket_issue_converter.IssueService()
self.maxDiff = None
def testCreateIssue(self):
issue_body = {
"assignee": "default_username",
"content": (
"```\none\n```\n\nReported by `a_uthor` on last year\n"
"- **Labels added**: added-label\n"
"- **Labels removed**: removed-label\n"),
"content_updated_on": "last month",
"created_on": "last year",
"id": 1,
"kind": "bug",
"priority": "minor",
"reporter": None,
"status": "resolved",
"title": "issue_title",
"updated_on": "last year",
}
issue_number = self._bitbucket_issue_service.CreateIssue(SINGLE_ISSUE)
self.assertEqual(1, issue_number)
actual = self._bitbucket_issue_service._bitbucket_issues[0]
# The comment body gets rewritten to preserve the origin ID.
issue_body["content"] = (
"Originally reported on Google Code with ID 1\n" + issue_body["content"])
self.assertEqual(issue_body, actual)
def testCloseIssue(self):
# no-op
self._bitbucket_issue_service.CloseIssue(123)
# TODO(chris): Add testcase for an issue comment with attachments.
def testCreateComment(self):
comment_body = {
"content": (
"```\none\n```\n\nReported by `a_uthor` on last year\n"
"- **Labels added**: added-label\n"
"- **Labels removed**: removed-label\n"),
"created_on": "last year",
"id": 1,
"issue": 1,
"updated_on": "last year",
"user": "a_uthor",
}
self._bitbucket_issue_service.CreateComment(
1, SINGLE_COMMENT)
actual = self._bitbucket_issue_service._bitbucket_comments[0]
self.assertEqual(comment_body, actual)
class TestIssueExporter(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self._bitbucket_user_service = bitbucket_issue_converter.UserService()
self._bitbucket_issue_service = bitbucket_issue_converter.IssueService()
self.issue_exporter = issues.IssueExporter(
self._bitbucket_issue_service, self._bitbucket_user_service,
NO_ISSUE_DATA, BITBUCKET_REPO, USER_MAP)
self.issue_exporter.Init()
def testCreateIssue(self):
issue_number = self.issue_exporter._CreateIssue(SINGLE_ISSUE)
self.assertEqual(1, issue_number)
def testCreateComments(self):
self.assertEqual(0, self.issue_exporter._comment_number)
self.issue_exporter._CreateComments(COMMENTS_DATA, 1234, SINGLE_ISSUE)
self.assertEqual(4, self.issue_exporter._comment_number)
def testStart(self):
self.issue_exporter._issue_json_data = [
{
"id": "1",
"title": "Title1",
"state": "open",
"status": "New",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO, COMMENT_THREE],
},
"labels": ["Type-Issue", "Priority-High"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User1"
},
"published": "last year",
"updated": "last month",
},
{
"id": "2",
"title": "Title2",
"state": "closed",
"status": "Fixed",
"owner": {"kind": "projecthosting#issuePerson",
"name": "User2"
},
"labels": [],
"comments": {
"items": [COMMENT_ONE],
},
"published": "last month",
"updated": "last week",
},
{
"id": "3",
"title": "Title3",
"state": "closed",
"status": "WontFix",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO],
},
"labels": ["Type-Defect"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User3"
},
"published": "last week",
"updated": "yesterday",
}]
self.issue_exporter.Init()
self.issue_exporter.Start()
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
# Comment counts are per issue and should match the numbers from the last
# issue created, minus one for the first comment, which is really
# the issue description.
self.assertEqual(1, self.issue_exporter._comment_number)
self.assertEqual(1, self.issue_exporter._comment_total)
if __name__ == "__main__":
unittest.main(buffer=True)
|
tarzan0820/odoo
|
refs/heads/8.0
|
addons/website_blog/controllers/main.py
|
132
|
# -*- coding: utf-8 -*-
import datetime
import werkzeug
from openerp import tools
from openerp.addons.web import http
from openerp.addons.web.controllers.main import login_redirect
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
class QueryURL(object):
def __init__(self, path='', path_args=None, **args):
self.path = path
self.args = args
self.path_args = set(path_args or [])
def __call__(self, path=None, path_args=None, **kw):
path = path or self.path
for k, v in self.args.items():
kw.setdefault(k, v)
path_args = set(path_args or []).union(self.path_args)
paths, fragments = [], []
for key, value in kw.items():
if value and key in path_args:
if isinstance(value, browse_record):
paths.append((key, slug(value)))
else:
paths.append((key, value))
elif value:
if isinstance(value, list) or isinstance(value, set):
fragments.append(werkzeug.url_encode([(key, item) for item in value]))
else:
fragments.append(werkzeug.url_encode([(key, value)]))
for key, value in paths:
path += '/' + key + '/%s' % value
if fragments:
path += '?' + '&'.join(fragments)
return path
class WebsiteBlog(http.Controller):
_blog_post_per_page = 20
_post_comment_per_page = 10
def nav_list(self):
blog_post_obj = request.registry['blog.post']
groups = blog_post_obj.read_group(
request.cr, request.uid, [], ['name', 'create_date'],
groupby="create_date", orderby="create_date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route([
'/blog',
'/blog/page/<int:page>',
], type='http', auth="public", website=True)
def blogs(self, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_obj = request.registry['blog.post']
total = blog_obj.search(cr, uid, [], count=True, context=context)
pager = request.website.pager(
url='/blog',
total=total,
page=page,
step=self._blog_post_per_page,
)
post_ids = blog_obj.search(cr, uid, [], offset=(page-1)*self._blog_post_per_page, limit=self._blog_post_per_page, context=context)
posts = blog_obj.browse(cr, uid, post_ids, context=context)
blog_url = QueryURL('', ['blog', 'tag'])
return request.website.render("website_blog.latest_blogs", {
'posts': posts,
'pager': pager,
'blog_url': blog_url,
})
@http.route([
'/blog/<model("blog.blog"):blog>',
'/blog/<model("blog.blog"):blog>/page/<int:page>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>/page/<int:page>',
], type='http', auth="public", website=True)
def blog(self, blog=None, tag=None, page=1, **opt):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog': current blog
- 'blogs': all blogs for navigation
- 'pager': pager of posts
- 'tag': current tag
- 'tags': all tags, for navigation
- 'nav_list': a dict [year][month] for archives navigation
- 'date': date_begin optional parameter, used in archives navigation
- 'blog_url': help object to create URLs
"""
date_begin, date_end = opt.get('date_begin'), opt.get('date_end')
cr, uid, context = request.cr, request.uid, request.context
blog_post_obj = request.registry['blog.post']
blog_obj = request.registry['blog.blog']
blog_ids = blog_obj.search(cr, uid, [], order="create_date asc", context=context)
blogs = blog_obj.browse(cr, uid, blog_ids, context=context)
domain = []
if blog:
domain += [('blog_id', '=', blog.id)]
if tag:
domain += [('tag_ids', 'in', tag.id)]
if date_begin and date_end:
domain += [("create_date", ">=", date_begin), ("create_date", "<=", date_end)]
blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end)
post_url = QueryURL('', ['blogpost'], tag_id=tag and tag.id or None, date_begin=date_begin, date_end=date_end)
blog_post_ids = blog_post_obj.search(cr, uid, domain, order="create_date desc", context=context)
blog_posts = blog_post_obj.browse(cr, uid, blog_post_ids, context=context)
pager = request.website.pager(
url=blog_url(),
total=len(blog_posts),
page=page,
step=self._blog_post_per_page,
)
pager_begin = (page - 1) * self._blog_post_per_page
pager_end = page * self._blog_post_per_page
blog_posts = blog_posts[pager_begin:pager_end]
tags = blog.all_tags()[blog.id]
values = {
'blog': blog,
'blogs': blogs,
'tags': tags,
'tag': tag,
'blog_posts': blog_posts,
'pager': pager,
'nav_list': self.nav_list(),
'blog_url': blog_url,
'post_url': post_url,
'date': date_begin,
}
response = request.website.render("website_blog.blog_post_short", values)
return response
@http.route([
'''/blog/<model("blog.blog"):blog>/post/<model("blog.post", "[('blog_id','=',blog[0])]"):blog_post>''',
], type='http', auth="public", website=True)
def blog_post(self, blog, blog_post, tag_id=None, page=1, enable_editor=None, **post):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog_post': browse of the current post
- 'blog': browse of the current blog
- 'blogs': list of browse records of blogs
- 'tag': current tag, if tag_id in parameters
- 'tags': all tags, for tag-based navigation
- 'pager': a pager on the comments
- 'nav_list': a dict [year][month] for archives navigation
- 'next_post': next blog post, to direct the user towards the next interesting post
"""
cr, uid, context = request.cr, request.uid, request.context
tag_obj = request.registry['blog.tag']
blog_post_obj = request.registry['blog.post']
date_begin, date_end = post.get('date_begin'), post.get('date_end')
pager_url = "/blogpost/%s" % blog_post.id
pager = request.website.pager(
url=pager_url,
total=len(blog_post.website_message_ids),
page=page,
step=self._post_comment_per_page,
scope=7
)
pager_begin = (page - 1) * self._post_comment_per_page
pager_end = page * self._post_comment_per_page
comments = blog_post.website_message_ids[pager_begin:pager_end]
tag = None
if tag_id:
tag = request.registry['blog.tag'].browse(request.cr, request.uid, int(tag_id), context=request.context)
post_url = QueryURL('', ['blogpost'], blogpost=blog_post, tag_id=tag_id, date_begin=date_begin, date_end=date_end)
blog_url = QueryURL('', ['blog', 'tag'], blog=blog_post.blog_id, tag=tag, date_begin=date_begin, date_end=date_end)
if not blog_post.blog_id.id == blog.id:
return request.redirect("/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post)))
tags = tag_obj.browse(cr, uid, tag_obj.search(cr, uid, [], context=context), context=context)
# Find next Post
all_post_ids = blog_post_obj.search(cr, uid, [('blog_id', '=', blog.id)], context=context)
# should always return at least the current post
current_blog_post_index = all_post_ids.index(blog_post.id)
next_post_id = all_post_ids[0 if current_blog_post_index == len(all_post_ids) - 1 \
else current_blog_post_index + 1]
next_post = next_post_id and blog_post_obj.browse(cr, uid, next_post_id, context=context) or False
values = {
'tags': tags,
'tag': tag,
'blog': blog,
'blog_post': blog_post,
'main_object': blog_post,
'nav_list': self.nav_list(),
'enable_editor': enable_editor,
'next_post': next_post,
'date': date_begin,
'post_url': post_url,
'blog_url': blog_url,
'pager': pager,
'comments': comments,
}
response = request.website.render("website_blog.blog_post_complete", values)
request.session[request.session_id] = request.session.get(request.session_id, [])
if not (blog_post.id in request.session[request.session_id]):
request.session[request.session_id].append(blog_post.id)
# Increase counter
blog_post_obj.write(cr, SUPERUSER_ID, [blog_post.id], {
'visits': blog_post.visits+1,
},context=context)
return response
def _blog_post_message(self, user, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_post = request.registry['blog.post']
partner_obj = request.registry['res.partner']
if uid != request.website.user_id.id:
partner_ids = [user.partner_id.id]
else:
partner_ids = blog_post._find_partner_from_emails(
cr, SUPERUSER_ID, 0, [post.get('email')], context=context)
if not partner_ids or not partner_ids[0]:
partner_ids = [partner_obj.create(cr, SUPERUSER_ID, {'name': post.get('name'), 'email': post.get('email')}, context=context)]
message_id = blog_post.message_post(
cr, SUPERUSER_ID, int(blog_post_id),
body=post.get('comment'),
type='comment',
subtype='mt_comment',
author_id=partner_ids[0],
path=post.get('path', False),
context=context)
return message_id
@http.route(['/blogpost/comment'], type='http', auth="public", website=True)
def blog_post_comment(self, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
if not request.session.uid:
return login_redirect()
if post.get('comment'):
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
blog_post = request.registry['blog.post']
blog_post.check_access_rights(cr, uid, 'read')
self._blog_post_message(user, blog_post_id, **post)
blog_post = request.registry['blog.post'].browse(cr, uid, int(blog_post_id), context=context)
return werkzeug.utils.redirect("/blog/%s/post/%s#comments" % (slug(blog_post.blog_id), slug(blog_post)))
def _get_discussion_detail(self, ids, publish=False, **post):
cr, uid, context = request.cr, request.uid, request.context
values = []
mail_obj = request.registry.get('mail.message')
for message in mail_obj.browse(cr, SUPERUSER_ID, ids, context=context):
values.append({
"id": message.id,
"author_name": message.author_id.name,
"author_image": message.author_id.image and \
("data:image/png;base64,%s" % message.author_id.image) or \
'/website_blog/static/src/img/anonymous.png',
"date": message.date,
'body': html2plaintext(message.body),
'website_published' : message.website_published,
'publish' : publish,
})
return values
@http.route(['/blogpost/post_discussion'], type='json', auth="public", website=True)
def post_discussion(self, blog_post_id, **post):
cr, uid, context = request.cr, request.uid, request.context
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
id = self._blog_post_message(user, blog_post_id, **post)
return self._get_discussion_detail([id], publish, **post)
@http.route('/blogpost/new', type='http', auth="public", website=True)
def blog_post_create(self, blog_id, **post):
cr, uid, context = request.cr, request.uid, request.context
new_blog_post_id = request.registry['blog.post'].create(cr, uid, {
'blog_id': blog_id,
'name': _("Blog Post Title"),
'subtitle': _("Subtitle"),
'content': '',
'website_published': False,
}, context=context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, new_blog_post_id, context=context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(new_blog_post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/duplicate', type='http', auth="public", website=True)
def blog_post_copy(self, blog_post_id, **post):
""" Duplicate a blog.
:param blog_post_id: id of the blog post currently browsed.
:return redirect to the new blog created
"""
cr, uid, context = request.cr, request.uid, request.context
create_context = dict(context, mail_create_nosubscribe=True)
nid = request.registry['blog.post'].copy(cr, uid, int(blog_post_id), {}, context=create_context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, nid, context=context)
post = request.registry['blog.post'].browse(cr, uid, nid, context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/get_discussion/', type='json', auth="public", website=True)
def discussion(self, post_id=0, path=None, count=False, **post):
cr, uid, context = request.cr, request.uid, request.context
mail_obj = request.registry.get('mail.message')
domain = [('res_id', '=', int(post_id)), ('model', '=', 'blog.post'), ('path', '=', path)]
#check current user belongs to website publisher group
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
if not publish:
domain.append(('website_published', '=', True))
ids = mail_obj.search(cr, SUPERUSER_ID, domain, count=count)
if count:
return ids
return self._get_discussion_detail(ids, publish, **post)
@http.route('/blogpost/get_discussions/', type='json', auth="public", website=True)
def discussions(self, post_id=0, paths=None, count=False, **post):
ret = []
for path in paths:
result = self.discussion(post_id=post_id, path=path, count=count, **post)
ret.append({"path": path, "val": result})
return ret
@http.route('/blogpost/change_background', type='json', auth="public", website=True)
def change_bg(self, post_id=0, image=None, **post):
if not post_id:
return False
return request.registry['blog.post'].write(request.cr, request.uid, [int(post_id)], {'background_image': image}, request.context)
@http.route('/blog/get_user/', type='json', auth="public", website=True)
def get_user(self, **post):
return [False if request.session.uid else True]
|
tchellomello/home-assistant
|
refs/heads/dev
|
tests/components/simulated/test_sensor.py
|
10
|
"""The tests for the simulated sensor."""
import unittest
from homeassistant.components.simulated.sensor import (
CONF_AMP,
CONF_FWHM,
CONF_MEAN,
CONF_PERIOD,
CONF_PHASE,
CONF_RELATIVE_TO_EPOCH,
CONF_SEED,
CONF_UNIT,
DEFAULT_AMP,
DEFAULT_FWHM,
DEFAULT_MEAN,
DEFAULT_NAME,
DEFAULT_PHASE,
DEFAULT_RELATIVE_TO_EPOCH,
DEFAULT_SEED,
)
from homeassistant.const import CONF_FRIENDLY_NAME
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
class TestSimulatedSensor(unittest.TestCase):
"""Test the simulated sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_default_config(self):
"""Test default config."""
config = {"sensor": {"platform": "simulated"}}
assert setup_component(self.hass, "sensor", config)
self.hass.block_till_done()
assert len(self.hass.states.entity_ids()) == 1
state = self.hass.states.get("sensor.simulated")
assert state.attributes.get(CONF_FRIENDLY_NAME) == DEFAULT_NAME
assert state.attributes.get(CONF_AMP) == DEFAULT_AMP
assert state.attributes.get(CONF_UNIT) is None
assert state.attributes.get(CONF_MEAN) == DEFAULT_MEAN
assert state.attributes.get(CONF_PERIOD) == 60.0
assert state.attributes.get(CONF_PHASE) == DEFAULT_PHASE
assert state.attributes.get(CONF_FWHM) == DEFAULT_FWHM
assert state.attributes.get(CONF_SEED) == DEFAULT_SEED
assert state.attributes.get(CONF_RELATIVE_TO_EPOCH) == DEFAULT_RELATIVE_TO_EPOCH
|
shapiroisme/datadollar
|
refs/heads/master-0.8
|
share/qt/make_spinner.py
|
4415
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
akretion/odoo
|
refs/heads/12-patch-paging-100-in-o2m
|
addons/account_check_printing/models/account_payment.py
|
8
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.misc import formatLang, format_date
INV_LINES_PER_STUB = 9
class AccountRegisterPayments(models.TransientModel):
_inherit = "account.register.payments"
check_amount_in_words = fields.Char(string="Amount in Words")
check_manual_sequencing = fields.Boolean(related='journal_id.check_manual_sequencing', readonly=1)
# Note: a check_number == 0 means that it will be attributed when the check is printed
check_number = fields.Integer(string="Check Number", readonly=True, copy=False, default=0,
help="Number of the check corresponding to this payment. If your pre-printed check are not already numbered, "
"you can manage the numbering in the journal configuration page.")
@api.onchange('journal_id')
def _onchange_journal_id(self):
if hasattr(super(AccountRegisterPayments, self), '_onchange_journal_id'):
super(AccountRegisterPayments, self)._onchange_journal_id()
if self.journal_id.check_manual_sequencing:
self.check_number = self.journal_id.check_sequence_id.number_next_actual
@api.onchange('amount')
def _onchange_amount(self):
if hasattr(super(AccountRegisterPayments, self), '_onchange_amount'):
super(AccountRegisterPayments, self)._onchange_amount()
self.check_amount_in_words = self.currency_id.amount_to_text(self.amount) if self.currency_id else False
def _prepare_payment_vals(self, invoices):
res = super(AccountRegisterPayments, self)._prepare_payment_vals(invoices)
if self.payment_method_id == self.env.ref('account_check_printing.account_payment_method_check'):
res.update({
'check_amount_in_words': self.currency_id.amount_to_text(res['amount']) if self.multi else self.check_amount_in_words,
})
return res
class AccountPayment(models.Model):
_inherit = "account.payment"
check_amount_in_words = fields.Char(string="Amount in Words")
check_manual_sequencing = fields.Boolean(related='journal_id.check_manual_sequencing', readonly=1)
check_number = fields.Integer(string="Check Number", readonly=True, copy=False,
help="The selected journal is configured to print check numbers. If your pre-printed check paper already has numbers "
"or if the current numbering is wrong, you can change it in the journal configuration page.")
@api.onchange('amount','currency_id')
def _onchange_amount(self):
res = super(AccountPayment, self)._onchange_amount()
self.check_amount_in_words = self.currency_id.amount_to_text(self.amount) if self.currency_id else ''
return res
def _check_communication(self, payment_method_id, communication):
super(AccountPayment, self)._check_communication(payment_method_id, communication)
if payment_method_id == self.env.ref('account_check_printing.account_payment_method_check').id:
if not communication:
return
if len(communication) > 60:
raise ValidationError(_("A check memo cannot exceed 60 characters."))
@api.multi
def post(self):
res = super(AccountPayment, self).post()
payment_method_check = self.env.ref('account_check_printing.account_payment_method_check')
for payment in self.filtered(lambda p: p.payment_method_id == payment_method_check and p.check_manual_sequencing):
sequence = payment.journal_id.check_sequence_id
payment.check_number = sequence.next_by_id()
return res
@api.multi
def print_checks(self):
""" Check that the recordset is valid, set the payments state to sent and call print_checks() """
# Since this method can be called via a client_action_multi, we need to make sure the received records are what we expect
self = self.filtered(lambda r: r.payment_method_id.code == 'check_printing' and r.state != 'reconciled')
if len(self) == 0:
raise UserError(_("Payments to print as a checks must have 'Check' selected as payment method and "
"not have already been reconciled"))
if any(payment.journal_id != self[0].journal_id for payment in self):
raise UserError(_("In order to print multiple checks at once, they must belong to the same bank journal."))
if not self[0].journal_id.check_manual_sequencing:
# The wizard asks for the number printed on the first pre-printed check
# so payments are attributed the number of the check the'll be printed on.
last_printed_check = self.search([
('journal_id', '=', self[0].journal_id.id),
('check_number', '!=', 0)], order="check_number desc", limit=1)
next_check_number = last_printed_check and last_printed_check.check_number + 1 or 1
return {
'name': _('Print Pre-numbered Checks'),
'type': 'ir.actions.act_window',
'res_model': 'print.prenumbered.checks',
'view_type': 'form',
'view_mode': 'form',
'target': 'new',
'context': {
'payment_ids': self.ids,
'default_next_check_number': next_check_number,
}
}
else:
self.filtered(lambda r: r.state == 'draft').post()
return self.do_print_checks()
@api.multi
def unmark_sent(self):
self.write({'state': 'posted'})
@api.multi
def do_print_checks(self):
""" This method is a hook for l10n_xx_check_printing modules to implement actual check printing capabilities """
raise UserError(_("You have to choose a check layout. For this, go in Apps, search for 'Checks layout' and install one."))
def set_check_amount_in_words(self):
for payment in self:
payment.check_amount_in_words = payment.currency_id.amount_to_text(payment.amount) if payment.currency_id else ''
#######################
#CHECK PRINTING METHODS
#######################
def _check_fill_line(self, amount_str):
return amount_str and (amount_str + ' ').ljust(200, '*') or ''
def _check_build_page_info(self, i, p):
multi_stub = self.company_id.account_check_printing_multi_stub
return {
'sequence_number': self.check_number if (self.journal_id.check_manual_sequencing and self.check_number != 0) else False,
'payment_date': format_date(self.env, self.payment_date),
'partner_id': self.partner_id,
'partner_name': self.partner_id.name,
'currency': self.currency_id,
'state': self.state,
'amount': formatLang(self.env, self.amount, currency_obj=self.currency_id) if i == 0 else 'VOID',
'amount_in_word': self._check_fill_line(self.check_amount_in_words) if i == 0 else 'VOID',
'memo': self.communication,
'stub_cropped': not multi_stub and len(self.invoice_ids) > INV_LINES_PER_STUB,
# If the payment does not reference an invoice, there is no stub line to display
'stub_lines': p,
}
def _check_get_pages(self):
""" Returns the data structure used by the template : a list of dicts containing what to print on pages.
"""
stub_pages = self._check_make_stub_pages() or [False]
pages = []
for i, p in enumerate(stub_pages):
pages.append(self._check_build_page_info(i, p))
return pages
def _check_make_stub_pages(self):
""" The stub is the summary of paid invoices. It may spill on several pages, in which case only the check on
first page is valid. This function returns a list of stub lines per page.
"""
if len(self.reconciled_invoice_ids) == 0:
return None
multi_stub = self.company_id.account_check_printing_multi_stub
invoices = self.reconciled_invoice_ids.sorted(key=lambda r: r.date_due)
debits = invoices.filtered(lambda r: r.type == 'in_invoice')
credits = invoices.filtered(lambda r: r.type == 'in_refund')
# Prepare the stub lines
if not credits:
stub_lines = [self._check_make_stub_line(inv) for inv in invoices]
else:
stub_lines = [{'header': True, 'name': "Bills"}]
stub_lines += [self._check_make_stub_line(inv) for inv in debits]
stub_lines += [{'header': True, 'name': "Refunds"}]
stub_lines += [self._check_make_stub_line(inv) for inv in credits]
# Crop the stub lines or split them on multiple pages
if not multi_stub:
# If we need to crop the stub, leave place for an ellipsis line
num_stub_lines = len(stub_lines) > INV_LINES_PER_STUB and INV_LINES_PER_STUB - 1 or INV_LINES_PER_STUB
stub_pages = [stub_lines[:num_stub_lines]]
else:
stub_pages = []
i = 0
while i < len(stub_lines):
# Make sure we don't start the credit section at the end of a page
if len(stub_lines) >= i + INV_LINES_PER_STUB and stub_lines[i + INV_LINES_PER_STUB - 1].get('header'):
num_stub_lines = INV_LINES_PER_STUB - 1 or INV_LINES_PER_STUB
else:
num_stub_lines = INV_LINES_PER_STUB
stub_pages.append(stub_lines[i:i + num_stub_lines])
i += num_stub_lines
return stub_pages
def _check_make_stub_line(self, invoice):
""" Return the dict used to display an invoice/refund in the stub
"""
# Find the account.partial.reconcile which are common to the invoice and the payment
if invoice.type in ['in_invoice', 'out_refund']:
invoice_sign = 1
invoice_payment_reconcile = invoice.move_id.line_ids.mapped('matched_debit_ids').filtered(lambda r: r.debit_move_id in self.move_line_ids)
else:
invoice_sign = -1
invoice_payment_reconcile = invoice.move_id.line_ids.mapped('matched_credit_ids').filtered(lambda r: r.credit_move_id in self.move_line_ids)
if self.currency_id != self.journal_id.company_id.currency_id:
amount_paid = abs(sum(invoice_payment_reconcile.mapped('amount_currency')))
else:
amount_paid = abs(sum(invoice_payment_reconcile.mapped('amount')))
amount_residual = invoice_sign * invoice.residual
return {
'due_date': format_date(self.env, invoice.date_due),
'number': invoice.reference and invoice.number + ' - ' + invoice.reference or invoice.number,
'amount_total': formatLang(self.env, invoice_sign * invoice.amount_total, currency_obj=invoice.currency_id),
'amount_residual': formatLang(self.env, amount_residual, currency_obj=invoice.currency_id) if amount_residual * 10**4 != 0 else '-',
'amount_paid': formatLang(self.env, invoice_sign * amount_paid, currency_obj=invoice.currency_id),
'currency': invoice.currency_id,
}
|
devs1991/test_edx_docmode
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/clean_cert_name.py
|
37
|
"""
A single-use management command that provides an interactive way to remove
erroneous certificate names.
"""
from collections import namedtuple
from django.core.management.base import BaseCommand
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
Result = namedtuple("Result", ["course_key", "cert_name_short", "cert_name_long", "should_clean"])
class Command(BaseCommand):
"""
A management command that provides an interactive way to remove erroneous cert_name_long and
cert_name_short course attributes across both the Split and Mongo modulestores.
"""
help = 'Allows manual clean-up of invalid cert_name_short and cert_name_long entries on CourseModules'
def _mongo_results(self):
"""
Return Result objects for any mongo-modulestore backend course that has
cert_name_short or cert_name_long set.
"""
# N.B. This code breaks many abstraction barriers. That's ok, because
# it's a one-time cleanup command.
# pylint: disable=protected-access
mongo_modulestore = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo)
old_mongo_courses = mongo_modulestore.collection.find({
"_id.category": "course",
"$or": [
{"metadata.cert_name_short": {"$exists": 1}},
{"metadata.cert_name_long": {"$exists": 1}},
]
}, {
"_id": True,
"metadata.cert_name_short": True,
"metadata.cert_name_long": True,
})
return [
Result(
mongo_modulestore.make_course_key(
course['_id']['org'],
course['_id']['course'],
course['_id']['name'],
),
course['metadata'].get('cert_name_short'),
course['metadata'].get('cert_name_long'),
True
) for course in old_mongo_courses
]
def _split_results(self):
"""
Return Result objects for any split-modulestore backend course that has
cert_name_short or cert_name_long set.
"""
# N.B. This code breaks many abstraction barriers. That's ok, because
# it's a one-time cleanup command.
# pylint: disable=protected-access
split_modulestore = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.split)
active_version_collection = split_modulestore.db_connection.course_index
structure_collection = split_modulestore.db_connection.structures
branches = active_version_collection.aggregate([{
'$group': {
'_id': 1,
'draft': {'$push': '$versions.draft-branch'},
'published': {'$push': '$versions.published-branch'}
}
}, {
'$project': {
'_id': 1,
'branches': {'$setUnion': ['$draft', '$published']}
}
}])['result'][0]['branches']
structures = list(
structure_collection.find({
'_id': {'$in': branches},
'blocks': {'$elemMatch': {
'$and': [
{"block_type": "course"},
{'$or': [
{'fields.cert_name_long': {'$exists': True}},
{'fields.cert_name_short': {'$exists': True}}
]}
]
}}
}, {
'_id': True,
'blocks.fields.cert_name_long': True,
'blocks.fields.cert_name_short': True,
})
)
structure_map = {struct['_id']: struct for struct in structures}
structure_ids = [struct['_id'] for struct in structures]
split_mongo_courses = list(active_version_collection.find({
'$or': [
{"versions.draft-branch": {'$in': structure_ids}},
{"versions.published": {'$in': structure_ids}},
]
}, {
'org': True,
'course': True,
'run': True,
'versions': True,
}))
for course in split_mongo_courses:
draft = course['versions'].get('draft-branch')
if draft in structure_map:
draft_fields = structure_map[draft]['blocks'][0].get('fields', {})
else:
draft_fields = {}
published = course['versions'].get('published')
if published in structure_map:
published_fields = structure_map[published]['blocks'][0].get('fields', {})
else:
published_fields = {}
for fields in (draft_fields, published_fields):
for field in ('cert_name_short', 'cert_name_long'):
if field in fields:
course[field] = fields[field]
return [
Result(
split_modulestore.make_course_key(
course['org'],
course['course'],
course['run'],
),
course.get('cert_name_short'),
course.get('cert_name_long'),
True
) for course in split_mongo_courses
]
def _display(self, results):
"""
Render a list of Result objects as a nicely formatted table.
"""
headers = ["Course Key", "cert_name_short", "cert_name_short", "Should clean?"]
col_widths = [
max(len(unicode(result[col])) for result in results + [headers])
for col in range(len(results[0]))
]
id_format = "{{:>{}}} |".format(len(unicode(len(results))))
col_format = "| {{:>{}}} |"
self.stdout.write(id_format.format(""), ending='')
for header, width in zip(headers, col_widths):
self.stdout.write(col_format.format(width).format(header), ending='')
self.stdout.write('')
for idx, result in enumerate(results):
self.stdout.write(id_format.format(idx), ending='')
for col, width in zip(result, col_widths):
self.stdout.write(col_format.format(width).format(unicode(col)), ending='')
self.stdout.write("")
def _commit(self, results):
"""
For each Result in ``results``, if ``should_clean`` is True, remove cert_name_long
and cert_name_short from the course and save in the backing modulestore.
"""
for result in results:
if not result.should_clean:
continue
course = modulestore().get_course(result.course_key)
del course.cert_name_short
del course.cert_name_long
modulestore().update_item(course, ModuleStoreEnum.UserID.mgmt_command)
def handle(self, *args, **options):
results = self._mongo_results() + self._split_results()
self.stdout.write("Type the index of a row to toggle whether it will be cleaned, "
"'commit' to remove all cert_name_short and cert_name_long values "
"from any rows marked for cleaning, or 'quit' to quit.")
while True:
self._display(results)
command = raw_input("<index>|commit|quit: ").strip()
if command == 'quit':
return
elif command == 'commit':
self._commit(results)
return
elif command == '':
continue
else:
index = int(command)
results[index] = results[index]._replace(should_clean=not results[index].should_clean)
|
mwx1993/TACTIC
|
refs/heads/master
|
src/tactic/ui/input/process_group_select_wdg.py
|
5
|
###########################################################
#
# Copyright (c) 2009, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['ProcessGroupSelectWdg', 'LoginTableElementWdg']
from pyasm.search import Search, SearchKey, SearchException
from pyasm.biz import Pipeline
from pyasm.web import DivWdg
from pyasm.widget import BaseInputWdg, SelectWdg, SimpleTableElementWdg
class ProcessGroupSelectWdg(BaseInputWdg):
'''This widget builds a select widget '''
ARGS_KEYS = {
"label_attr": {
'description': "a list of | seperated login attributes to show in place of just the login attribute",
'type': 'TextWdg',
'order': 0,
'category': 'Display'
}
}
def init(my):
pass
def get_display(my):
my.labels_attr = my.get_option('label_attr')
if my.labels_attr:
my.labels_attr = my.labels_attr.split('|')
from tactic.ui.panel import EditWdg
if hasattr(my, 'parent_wdg') and isinstance(my.get_parent_wdg(), EditWdg):
sobject = my.get_current_sobject()
parent = sobject.get_parent()
group = None
pipeline_code = None
if parent:
pipeline_code = parent.get_value('pipeline_code')
pipeline = Pipeline.get_by_code(pipeline_code)
labels_expr = None
if pipeline:
attrs = pipeline.get_process_attrs(sobject.get_value('process'))
group = attrs.get('%s_login_group'%my.get_name())
if group:
values_expr = "@GET(sthpw/login_group['login_group', '%s'].sthpw/login_in_group.sthpw/login.login)"%group
if my.labels_attr:
labels_expr = ["@GET(sthpw/login_group['login_group', '%s'].sthpw/login_in_group.sthpw/login.%s)"%(group, x.strip()) for x in my.labels_attr]
labels_expr = ' +   + '.join(labels_expr)
else:
values_expr = "@GET(sthpw/login.login)"
if my.labels_attr:
labels_expr = ["@GET(sthpw/login.%s)"%(x.strip()) for x in my.labels_attr]
labels_expr = ' +   + '.join(labels_expr)
select = SelectWdg(my.get_input_name())
select.add_empty_option("-- Select a User --")
"""
values = []
labels = []
for user in group_users:
values.append(user)
labels.append(' %s'%user)
"""
select.set_option('values_expr', values_expr)
if labels_expr:
select.set_option('labels_expr', labels_expr)
current_value = sobject.get_value(my.get_name())
if current_value:
select.set_value(current_value)
return select
#all_users = Search.eval("@GET(sthpw/login.login)")
all_users = Search.eval("@SOBJECT(sthpw/login)")
all_users_label = []
# don't use expression here since it's not as db-efficient as retrieving the sobjects
"""
if my.labels_attr:
labels_expr = ["@GET(sthpw/login.login.%s)"%x.strip() for x in my.labels_attr]
"""
'''
groups = Search.eval("@SOBJECT(sthpw/login_group)")
group_dict = {}
for group in groups:
group_users =
Search.eval("@GET(sthpw/login_group['login_group',
'%s'].sthpw/login_in_group.sthpw/login.login)"%group.get_value('login_group'))
group_dict[group.get_value('login_group')] = group_users
'''
logins_dict = {}
for user in all_users:
user_name = user.get_value('login')
logins_dict[user_name] = {}
group_dict = {}
items = Search.eval("@SOBJECT(sthpw/login_in_group)")
for item in items:
item_login = item.get_value("login")
if logins_dict.get(item_login) == None:
continue
item_group = item.get_value("login_group")
group_list = group_dict.get(item_group)
if group_list == None:
group_list = []
group_dict[item_group] = group_list
group_list.append(item_login)
top = DivWdg()
top.add_class("spt_input_top")
# HACK! This isn't very well constructed
### Tore: Not my code! Copied from ProcessContextInputWdg. Seems to work though.
top.add_attr("spt_cbjs_get_input_key", "return cell_to_edit.getAttribute('spt_pipeline_code');")
# Adding an "all users" select option in case it can't find a useful select widget.
div = DivWdg()
div.add_class("spt_input_option")
#div.add_attr("spt_input_key", '__all__') #Not needed, since it defaults to the first one anyway.
select = SelectWdg(my.get_name())
select.add_empty_option("-- Select a User --")
values = []
labels = []
labels_dict = {}
for user in all_users:
user_name = user.get_value('login')
values.append(user_name)
label = user_name
if my.labels_attr:
user_labels = [user.get_value(x) for x in my.labels_attr]
label = ' '.join(user_labels)
labels_dict[user_name] = label
labels.append('%s'%label)
#print "select ", user_name
# -- NOTE: leaving this commented out code here for reference. Not sure why this is the case but when
# -- this click behavior is used instead of a 'change' behavior that forces a blur on select,
# -- click selection only works for this widget in Firefox and does NOT work in IE
#
# select.add_behavior( { 'type': 'click',
# 'cbjs_action': 'spt.dg_table.select_wdg_clicked( evt, bvr.src_el );' } )
# -- Replace previous 'click' behavior with a 'change' behavior to force blur() of select element ...
# -- this works for both Firefox and IE
#
select.add_behavior( { 'type': 'change',
'cbjs_action': 'bvr.src_el.blur();' } )
#behavior = {
# 'type': 'keyboard',
# 'kbd_handler_name': 'DgTableSelectWidgetKeyInput',
#}
#select.add_behavior( behavior )
select.set_option("values", values)
select.set_option("labels", labels)
div.add(select)
top.add(div)
#Building each of the select widgets per group here.
for group in group_dict.keys():
div = DivWdg()
div.add_class("spt_input_option")
div.add_attr("spt_input_key", group)
select = SelectWdg(my.get_name())
select.add_empty_option("-- Select a User --")
values = ['']
labels = ['<< %s >>'%group]
for user in group_dict[group]:
values.append(user)
label = labels_dict.get(user)
labels.append(' %s'%label)
select.add_behavior( { 'type': 'click',
'cbjs_action': 'spt.dg_table.select_wdg_clicked( evt, bvr.src_el );' } )
#behavior = {
# 'type': 'keyboard',
# 'kbd_handler_name': 'DgTableSelectWidgetKeyInput',
#}
#select.add_behavior( behavior )
select.set_option("values", values)
select.set_option("labels", labels)
div.add(select)
top.add(div)
return top
class LoginTableElementWdg(SimpleTableElementWdg):
'''This ElementWdg is used to add the group that the given task is looking
for in ProcessGroupSelectWdg.'''
def handle_td(my, td):
super(LoginTableElementWdg, my).handle_td(td)
task = my.get_current_sobject()
if task:
search_type = task.get_value('search_type')
search_id = task.get_value('search_id')
if not search_type or not search_id:
return
search_key = SearchKey.build_search_key(search_type, search_id, column='id')
from pyasm.common import SObjectSecurityException
try:
parent = Search.get_by_search_key(search_key)
pipeline = Pipeline.get_by_sobject(parent)
if pipeline:
attrs = pipeline.get_process_attrs(task.get_value('process'))
td.add_attr('spt_pipeline_code', attrs.get('%s_login_group'%my.get_name()))
except SObjectSecurityException, e:
pass
except SearchException, e:
if e.__str__().find('not registered') != -1:
pass
elif e.__str__().find('does not exist for database') != -1:
pass
elif e.__str__().find('Cannot find project') != -1:
pass
else:
raise
|
janusnic/dj-21v
|
refs/heads/master
|
unit_12/mysite/userprofiles/mixins.py
|
9
|
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
class LoginRequiredMixin(object):
"""Ensures that the user is authenticated in order to access the view.
http://djangosnippets.org/snippets/2442/"""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
|
mehdidc/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/sparse_pca.py
|
26
|
"""Matrix factorization with Sparse PCA"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import numpy as np
from ..utils import check_random_state, check_array
from ..utils.validation import check_is_fitted
from ..linear_model import ridge_regression
from ..base import BaseEstimator, TransformerMixin
from .dict_learning import dict_learning, dict_learning_online
class SparsePCA(BaseEstimator, TransformerMixin):
"""Sparse Principal Components Analysis (SparsePCA)
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
Number of sparse atoms to extract.
alpha : float,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run.
U_init : array of shape (n_samples, n_components),
Initial values for the loadings for warm restart scenarios.
V_init : array of shape (n_components, n_features),
Initial values for the components for warm restart scenarios.
verbose :
Degree of verbosity of the printed output.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
Sparse components extracted from the data.
error_ : array
Vector of errors at each iteration.
n_iter_ : int
Number of iterations run.
See also
--------
PCA
MiniBatchSparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
max_iter=1000, tol=1e-8, method='lars', n_jobs=1, U_init=None,
V_init=None, verbose=False, random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.max_iter = max_iter
self.tol = tol
self.method = method
self.n_jobs = n_jobs
self.U_init = U_init
self.V_init = V_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
code_init = self.V_init.T if self.V_init is not None else None
dict_init = self.U_init.T if self.U_init is not None else None
Vt, _, E, self.n_iter_ = dict_learning(X.T, n_components, self.alpha,
tol=self.tol,
max_iter=self.max_iter,
method=self.method,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=random_state,
code_init=code_init,
dict_init=dict_init,
return_n_iter=True
)
self.components_ = Vt.T
self.error_ = E
return self
def transform(self, X, ridge_alpha=None):
"""Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X: array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
ridge_alpha: float, default: 0.01
Amount of ridge shrinkage to apply in order to improve
conditioning.
Returns
-------
X_new array, shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X)
ridge_alpha = self.ridge_alpha if ridge_alpha is None else ridge_alpha
U = ridge_regression(self.components_.T, X.T, ridge_alpha,
solver='cholesky')
s = np.sqrt((U ** 2).sum(axis=0))
s[s == 0] = 1
U /= s
return U
class MiniBatchSparsePCA(SparsePCA):
"""Mini-batch Sparse Principal Components Analysis
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Parameters
----------
n_components : int,
number of sparse atoms to extract
alpha : int,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
n_iter : int,
number of iterations to perform for each mini batch
callback : callable,
callable that gets invoked every five iterations
batch_size : int,
the number of features to take in each mini batch
verbose :
degree of output the procedure will print
shuffle : boolean,
whether to shuffle the data before splitting it in batches
n_jobs : int,
number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
Sparse components extracted from the data.
error_ : array
Vector of errors at each iteration.
n_iter_ : int
Number of iterations run.
See also
--------
PCA
SparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
n_iter=100, callback=None, batch_size=3, verbose=False,
shuffle=True, n_jobs=1, method='lars', random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.n_iter = n_iter
self.callback = callback
self.batch_size = batch_size
self.verbose = verbose
self.shuffle = shuffle
self.n_jobs = n_jobs
self.method = method
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
Vt, _, self.n_iter_ = dict_learning_online(
X.T, n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=True,
dict_init=None, verbose=self.verbose,
callback=self.callback,
batch_size=self.batch_size,
shuffle=self.shuffle,
n_jobs=self.n_jobs, method=self.method,
random_state=random_state,
return_n_iter=True
)
self.components_ = Vt.T
return self
|
lufeewu/flasky
|
refs/heads/master
|
manage.py
|
76
|
#!/usr/bin/env python
import os
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from app.models import User, Follow, Role, Permission, Post, Comment
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Comment=Comment)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade
from app.models import Role, User
# migrate database to latest revision
upgrade()
# create user roles
Role.insert_roles()
# create self-follows for all users
User.add_self_follows()
if __name__ == '__main__':
manager.run()
|
dziadu/gitbrowser
|
refs/heads/master
|
tests/__init__.py
|
803
|
# -*- coding: utf-8 -*-
|
barnone/EigenD
|
refs/heads/2.0
|
tools/packages/SCons/Tool/f77.py
|
2
|
"""engine.SCons.Tool.f77
Tool-specific initialization for the generic Posix f77 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/f77.py 4577 2009/12/27 19:43:56 scons"
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_f77_to_env
compilers = ['f77']
def generate(env):
add_all_to_env(env)
add_f77_to_env(env)
fcomp = env.Detect(compilers) or 'f77'
env['F77'] = fcomp
env['SHF77'] = fcomp
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = fcomp
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
FePhyFoFum/PyPHLAWD
|
refs/heads/master
|
src/get_ncbi_ids_for_names.py
|
1
|
import os
import sys
import sqlite3
def get_taxid_for_name(cursor, name):
cursor.execute("select ncbi_id from taxonomy where name = '"+name+"';")
ttax_id = None
for j in cursor:
ttax_id = str(j[0])
return ttax_id
if __name__ == "__main__":
if len(sys.argv) != 3:
print("python "+sys.argv[0]+" infile database")
sys.exit(0)
inf = open(sys.argv[1],"r")
dbname = sys.argv[2]
conn = sqlite3.connect(dbname)
c = conn.cursor()
outfile = open("ncbi_notfound.ids","w")
for i in inf:
nm = i.strip()
if len(nm) < 2:
continue
tt = get_taxid_for_name(c,nm)
if tt is not None:
print(tt)
else:
outfile.write(nm+"\n")
outfile.close()
inf.close()
c.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.