code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
import random
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
parse_duration,
parse_iso8601,
)
class TwitchBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'http://usher.twitch.tv'
_LOGIN_URL = 'https://secure.twitch.tv/login'
_LOGIN_POST_URL = 'https://passport.twitch.tv/authorize'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
def _download_json(self, url, video_id, note='Downloading JSON metadata'):
headers = {
'Referer': 'http://api.twitch.tv/crossdomain/receiver.html?v=2',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'api_token':
headers['Twitch-Api-Token'] = cookie.value
request = compat_urllib_request.Request(url, headers=headers)
response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'login': username.encode('utf-8'),
'password': password.encode('utf-8'),
})
request = compat_urllib_request.Request(
self._LOGIN_POST_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
error_message = self._search_regex(
r'<div[^>]+class="subwindow_notice"[^>]*>([^<]+)</div>',
response, 'error message', default=None)
if error_message:
raise ExtractorError(
'Unable to login. Twitch said: %s' % error_message, expected=True)
if '>Reset your password<' in response:
self.report_warning('Twitch asks you to reset your password, go to https://secure.twitch.tv/reset/submit')
def _prefer_source(self, formats):
try:
source = next(f for f in formats if f['format_id'] == 'Source')
source['preference'] = 10
except StopIteration:
pass # No Source stream present
self._sort_formats(formats)
class TwitchItemBaseIE(TwitchBaseIE):
def _download_info(self, item, item_id):
return self._extract_info(self._download_json(
'%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
'Downloading %s info JSON' % self._ITEM_TYPE))
def _extract_media(self, item_id):
info = self._download_info(self._ITEM_SHORTCUT, item_id)
response = self._download_json(
'%s/api/videos/%s%s' % (self._API_BASE, self._ITEM_SHORTCUT, item_id), item_id,
'Downloading %s playlist JSON' % self._ITEM_TYPE)
entries = []
chunks = response['chunks']
qualities = list(chunks.keys())
for num, fragment in enumerate(zip(*chunks.values()), start=1):
formats = []
for fmt_num, fragment_fmt in enumerate(fragment):
format_id = qualities[fmt_num]
fmt = {
'url': fragment_fmt['url'],
'format_id': format_id,
'quality': 1 if format_id == 'live' else 0,
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
entry = dict(info)
entry['id'] = '%s_%d' % (entry['id'], num)
entry['title'] = '%s part %d' % (entry['title'], num)
entry['formats'] = formats
entries.append(entry)
return self.playlist_result(entries, info['id'], info['title'])
def _extract_info(self, info):
return {
'id': info['_id'],
'title': info['title'],
'description': info['description'],
'duration': info['length'],
'thumbnail': info['preview'],
'uploader': info['channel']['display_name'],
'uploader_id': info['channel']['name'],
'timestamp': parse_iso8601(info['recorded_at']),
'view_count': info['views'],
}
def _real_extract(self, url):
return self._extract_media(self._match_id(url))
class TwitchVideoIE(TwitchItemBaseIE):
IE_NAME = 'twitch:video'
_VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'video'
_ITEM_SHORTCUT = 'a'
_TEST = {
'url': 'http://www.twitch.tv/riotgames/b/577357806',
'info_dict': {
'id': 'a577357806',
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
},
'playlist_mincount': 12,
}
class TwitchChapterIE(TwitchItemBaseIE):
IE_NAME = 'twitch:chapter'
_VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'chapter'
_ITEM_SHORTCUT = 'c'
_TESTS = [{
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
'info_dict': {
'id': 'c5285812',
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
},
'playlist_mincount': 3,
}, {
'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
'only_matching': True,
}]
class TwitchVodIE(TwitchItemBaseIE):
IE_NAME = 'twitch:vod'
_VALID_URL = r'%s/[^/]+/v/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'vod'
_ITEM_SHORTCUT = 'v'
_TEST = {
'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
'info_dict': {
'id': 'v6528877',
'ext': 'mp4',
'title': 'LCK Summer Split - Week 6 Day 1',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 17208,
'timestamp': 1435131709,
'upload_date': '20150624',
'uploader': 'Riot Games',
'uploader_id': 'riotgames',
'view_count': int,
'start_time': 310,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
item_id = self._match_id(url)
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._download_json(
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
'Downloading %s access token' % self._ITEM_TYPE)
formats = self._extract_m3u8_formats(
'%s/vod/%s?nauth=%s&nauthsig=%s&allow_source=true'
% (self._USHER_BASE, item_id, access_token['token'], access_token['sig']),
item_id, 'mp4')
self._prefer_source(formats)
info['formats'] = formats
parsed_url = compat_urllib_parse_urlparse(url)
query = compat_parse_qs(parsed_url.query)
if 't' in query:
info['start_time'] = parse_duration(query['t'][0])
return info
class TwitchPlaylistBaseIE(TwitchBaseIE):
_PLAYLIST_URL = '%s/kraken/channels/%%s/videos/?offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
_PAGE_LIMIT = 100
def _extract_playlist(self, channel_id):
info = self._download_json(
'%s/kraken/channels/%s' % (self._API_BASE, channel_id),
channel_id, 'Downloading channel info JSON')
channel_name = info.get('display_name') or info.get('name')
entries = []
offset = 0
limit = self._PAGE_LIMIT
for counter in itertools.count(1):
response = self._download_json(
self._PLAYLIST_URL % (channel_id, offset, limit),
channel_id, 'Downloading %s videos JSON page %d' % (self._PLAYLIST_TYPE, counter))
page_entries = self._extract_playlist_page(response)
if not page_entries:
break
entries.extend(page_entries)
offset += limit
return self.playlist_result(
[self.url_result(entry) for entry in set(entries)],
channel_id, channel_name)
def _extract_playlist_page(self, response):
videos = response.get('videos')
return [video['url'] for video in videos] if videos else []
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class TwitchProfileIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:profile'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_TYPE = 'profile'
_TEST = {
'url': 'http://www.twitch.tv/vanillatv/profile',
'info_dict': {
'id': 'vanillatv',
'title': 'VanillaTV',
},
'playlist_mincount': 412,
}
class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:past_broadcasts'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_URL = TwitchPlaylistBaseIE._PLAYLIST_URL + '&broadcasts=true'
_PLAYLIST_TYPE = 'past broadcasts'
_TEST = {
'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 54,
}
class TwitchBookmarksIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:bookmarks'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/bookmarks/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_URL = '%s/api/bookmark/?user=%%s&offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
_PLAYLIST_TYPE = 'bookmarks'
_TEST = {
'url': 'http://www.twitch.tv/ognos/profile/bookmarks',
'info_dict': {
'id': 'ognos',
'title': 'Ognos',
},
'playlist_mincount': 3,
}
def _extract_playlist_page(self, response):
entries = []
for bookmark in response.get('bookmarks', []):
video = bookmark.get('video')
if not video:
continue
entries.append(video['url'])
return entries
class TwitchStreamIE(TwitchBaseIE):
IE_NAME = 'twitch:stream'
_VALID_URL = r'%s/(?P<id>[^/#?]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'http://www.twitch.tv/shroomztv',
'info_dict': {
'id': '12772022048',
'display_id': 'shroomztv',
'ext': 'mp4',
'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
'is_live': True,
'timestamp': 1421928037,
'upload_date': '20150122',
'uploader': 'ShroomzTV',
'uploader_id': 'shroomztv',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.twitch.tv/miracle_doto#profile-0',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
stream = self._download_json(
'%s/kraken/streams/%s' % (self._API_BASE, channel_id), channel_id,
'Downloading stream JSON').get('stream')
# Fallback on profile extraction if stream is offline
if not stream:
return self.url_result(
'http://www.twitch.tv/%s/profile' % channel_id,
'TwitchProfile', channel_id)
# Channel name may be typed if different case than the original channel name
# (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
# an invalid m3u8 URL. Working around by use of original channel name from stream
# JSON and fallback to lowercase if it's not available.
channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
access_token = self._download_json(
'%s/api/channels/%s/access_token' % (self._API_BASE, channel_id), channel_id,
'Downloading channel access token')
query = {
'allow_source': 'true',
'p': random.randint(1000000, 10000000),
'player': 'twitchweb',
'segment_preference': '4',
'sig': access_token['sig'].encode('utf-8'),
'token': access_token['token'].encode('utf-8'),
}
formats = self._extract_m3u8_formats(
'%s/api/channel/hls/%s.m3u8?%s'
% (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)),
channel_id, 'mp4')
self._prefer_source(formats)
view_count = stream.get('viewers')
timestamp = parse_iso8601(stream.get('created_at'))
channel = stream['channel']
title = self._live_title(channel.get('display_name') or channel.get('name'))
description = channel.get('status')
thumbnails = []
for thumbnail_key, thumbnail_url in stream['preview'].items():
m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
if not m:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return {
'id': compat_str(stream['_id']),
'display_id': channel_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'uploader': channel.get('display_name'),
'uploader_id': channel.get('name'),
'timestamp': timestamp,
'view_count': view_count,
'formats': formats,
'is_live': True,
}
|
vimagick/youtube-dl
|
youtube_dl/extractor/twitch.py
|
Python
|
unlicense
| 14,731
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.v2.dataset.common
import unittest
import tempfile
import glob
class TestCommon(unittest.TestCase):
def test_md5file(self):
_, temp_path = tempfile.mkstemp()
with open(temp_path, 'w') as f:
f.write("Hello\n")
self.assertEqual('09f7e02f1290be211da707a266f153b3',
paddle.v2.dataset.common.md5file(temp_path))
def test_download(self):
yi_avatar = 'https://avatars0.githubusercontent.com/u/1548775?v=3&s=460'
self.assertEqual(
paddle.v2.dataset.common.DATA_HOME + '/test/1548775?v=3&s=460',
paddle.v2.dataset.common.download(
yi_avatar, 'test', 'f75287202d6622414c706c36c16f8e0d'))
def test_split(self):
def test_reader():
def reader():
for x in xrange(10):
yield x
return reader
_, temp_path = tempfile.mkstemp()
paddle.v2.dataset.common.split(
test_reader(), 4, suffix=temp_path + '/test-%05d.pickle')
files = glob.glob(temp_path + '/test-%05d.pickle')
self.assertEqual(len(files), 3)
def test_cluster_file_reader(self):
_, temp_path = tempfile.mkstemp()
for x in xrange(5):
with open(temp_path + '/%05d.test' % x) as f:
f.write('%d\n' % x)
reader = paddle.v2.dataset.common.cluster_files_reader(
temp_path + '/*.test', 5, 0)
for idx, e in enumerate(reader()):
self.assertEqual(e, str("0"))
def test_convert(self):
record_num = 10
num_shards = 4
def test_reader():
def reader():
for x in xrange(record_num):
yield x
return reader
path = tempfile.mkdtemp()
paddle.v2.dataset.common.convert(path,
test_reader(), num_shards,
'random_images')
files = glob.glob(path + '/random_images-*')
self.assertEqual(len(files), num_shards)
recs = []
for i in range(0, num_shards):
n = "%s/random_images-%05d-of-%05d" % (path, i, num_shards - 1)
r = recordio.reader(n)
while True:
d = r.read()
if d is None:
break
recs.append(d)
recs.sort()
self.assertEqual(total, record_num)
if __name__ == '__main__':
unittest.main()
|
QiJune/Paddle
|
python/paddle/v2/dataset/tests/common_test.py
|
Python
|
apache-2.0
| 3,112
|
from waterbutler.server.api.v0 import zip
from waterbutler.server.api.v0 import copy
from waterbutler.server.api.v0 import crud
from waterbutler.server.api.v0 import move
from waterbutler.server.api.v0 import metadata
from waterbutler.server.api.v0 import revisions
PREFIX = ''
HANDLERS = [
(r'/ops/copy', copy.CopyHandler),
(r'/ops/move', move.MoveHandler),
(r'/zip', zip.ZipHandler),
(r'/file', crud.CRUDHandler),
(r'/data', metadata.MetadataHandler),
(r'/revisions', revisions.RevisionHandler),
]
|
Johnetordoff/waterbutler
|
waterbutler/server/api/v0/__init__.py
|
Python
|
apache-2.0
| 526
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from testtools import matchers
from keystone import exception
class PolicyAssociationTests(object):
def _assert_correct_policy(self, endpoint, policy):
ref = (
self.endpoint_policy_api.get_policy_for_endpoint(endpoint['id']))
self.assertEqual(policy['id'], ref['id'])
def _assert_correct_endpoints(self, policy, endpoint_list):
endpoint_id_list = [ep['id'] for ep in endpoint_list]
endpoints = (
self.endpoint_policy_api.list_endpoints_for_policy(policy['id']))
self.assertThat(endpoints, matchers.HasLength(len(endpoint_list)))
for endpoint in endpoints:
self.assertIn(endpoint['id'], endpoint_id_list)
def load_sample_data(self):
"""Create sample data to test policy associations.
The following data is created:
- 3 regions, in a hierarchy, 0 -> 1 -> 2 (where 0 is top)
- 3 services
- 6 endpoints, 2 in each region, with a mixture of services:
0 - region 0, Service 0
1 - region 0, Service 1
2 - region 1, Service 1
3 - region 1, Service 2
4 - region 2, Service 2
5 - region 2, Service 0
"""
def new_endpoint(region_id, service_id):
endpoint = {'id': uuid.uuid4().hex, 'interface': 'test',
'region_id': region_id, 'service_id': service_id,
'url': '/url'}
self.endpoint.append(self.catalog_api.create_endpoint(
endpoint['id'], endpoint))
self.policy = []
self.endpoint = []
self.service = []
self.region = []
for i in range(3):
policy = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex,
'blob': {'data': uuid.uuid4().hex}}
self.policy.append(self.policy_api.create_policy(policy['id'],
policy))
service = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex}
self.service.append(self.catalog_api.create_service(service['id'],
service))
region = {'id': uuid.uuid4().hex, 'description': uuid.uuid4().hex}
# Link the 3 regions together as a hierarchy, [0] at the top
if i != 0:
region['parent_region_id'] = self.region[i - 1]['id']
self.region.append(self.catalog_api.create_region(region))
new_endpoint(self.region[0]['id'], self.service[0]['id'])
new_endpoint(self.region[0]['id'], self.service[1]['id'])
new_endpoint(self.region[1]['id'], self.service[1]['id'])
new_endpoint(self.region[1]['id'], self.service[2]['id'])
new_endpoint(self.region[2]['id'], self.service[2]['id'])
new_endpoint(self.region[2]['id'], self.service[0]['id'])
def test_policy_to_endpoint_association_crud(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.check_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.delete_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'])
def test_overwriting_policy_to_endpoint_association(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[1]['id'], endpoint_id=self.endpoint[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.check_policy_association(
self.policy[1]['id'], endpoint_id=self.endpoint[0]['id'])
def test_invalid_policy_to_endpoint_association(self):
self.assertRaises(exception.InvalidPolicyAssociation,
self.endpoint_policy_api.create_policy_association,
self.policy[0]['id'])
self.assertRaises(exception.InvalidPolicyAssociation,
self.endpoint_policy_api.create_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'],
region_id=self.region[0]['id'])
self.assertRaises(exception.InvalidPolicyAssociation,
self.endpoint_policy_api.create_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'],
service_id=self.service[0]['id'])
self.assertRaises(exception.InvalidPolicyAssociation,
self.endpoint_policy_api.create_policy_association,
self.policy[0]['id'],
region_id=self.region[0]['id'])
def test_policy_to_explicit_endpoint_association(self):
# Associate policy 0 with endpoint 0
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self._assert_correct_policy(self.endpoint[0], self.policy[0])
self._assert_correct_endpoints(self.policy[0], [self.endpoint[0]])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.get_policy_for_endpoint,
uuid.uuid4().hex)
def test_policy_to_service_association(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[1]['id'], service_id=self.service[1]['id'])
# Endpoints 0 and 5 are part of service 0
self._assert_correct_policy(self.endpoint[0], self.policy[0])
self._assert_correct_policy(self.endpoint[5], self.policy[0])
self._assert_correct_endpoints(
self.policy[0], [self.endpoint[0], self.endpoint[5]])
# Endpoints 1 and 2 are part of service 1
self._assert_correct_policy(self.endpoint[1], self.policy[1])
self._assert_correct_policy(self.endpoint[2], self.policy[1])
self._assert_correct_endpoints(
self.policy[1], [self.endpoint[1], self.endpoint[2]])
def test_policy_to_region_and_service_association(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[1]['id'], service_id=self.service[1]['id'],
region_id=self.region[1]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[2]['id'], service_id=self.service[2]['id'],
region_id=self.region[2]['id'])
# Endpoint 0 is in region 0 with service 0, so should get policy 0
self._assert_correct_policy(self.endpoint[0], self.policy[0])
# Endpoint 5 is in Region 2 with service 0, so should also get
# policy 0 by searching up the tree to Region 0
self._assert_correct_policy(self.endpoint[5], self.policy[0])
# Looking the other way round, policy 2 should only be in use by
# endpoint 4, since that's the only endpoint in region 2 with the
# correct service
self._assert_correct_endpoints(
self.policy[2], [self.endpoint[4]])
# Policy 1 should only be in use by endpoint 2, since that's the only
# endpoint in region 1 (and region 2 below it) with the correct service
self._assert_correct_endpoints(
self.policy[1], [self.endpoint[2]])
# Policy 0 should be in use by endpoint 0, as well as 5 (since 5 is
# of the correct service and in region 2 below it)
self._assert_correct_endpoints(
self.policy[0], [self.endpoint[0], self.endpoint[5]])
def test_delete_association_by_entity(self):
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'])
self.endpoint_policy_api.delete_association_by_endpoint(
self.endpoint[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
endpoint_id=self.endpoint[0]['id'])
# Make sure deleting it again is silent - since this method is used
# in response to notifications by the controller.
self.endpoint_policy_api.delete_association_by_endpoint(
self.endpoint[0]['id'])
# Now try with service - ensure both combined region & service
# associations and explicit service ones are removed
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[1]['id'], service_id=self.service[0]['id'],
region_id=self.region[1]['id'])
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'])
self.endpoint_policy_api.delete_association_by_service(
self.service[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[1]['id'],
service_id=self.service[0]['id'],
region_id=self.region[1]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
service_id=self.service[0]['id'])
# Finally, check delete by region
self.endpoint_policy_api.create_policy_association(
self.policy[0]['id'], service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.endpoint_policy_api.delete_association_by_region(
self.region[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
service_id=self.service[0]['id'],
region_id=self.region[0]['id'])
self.assertRaises(exception.NotFound,
self.endpoint_policy_api.check_policy_association,
self.policy[0]['id'],
service_id=self.service[0]['id'])
|
promptworks/keystone
|
keystone/tests/unit/test_backend_endpoint_policy.py
|
Python
|
apache-2.0
| 12,175
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public Python API of TensorFlow Debugger (tfdbg).
See the [TFDBG](https://www.tensorflow.org/guide/debugger) guide.
@@add_debug_tensor_watch
@@watch_graph
@@watch_graph_with_denylists
@@DebugTensorDatum
@@DebugDumpDir
@@load_tensor_from_event
@@load_tensor_from_event_file
@@has_inf_or_nan
@@DumpingDebugHook
@@DumpingDebugWrapperSession
@@GrpcDebugHook
@@GrpcDebugWrapperSession
@@LocalCLIDebugHook
@@LocalCLIDebugWrapperSession
@@TensorBoardDebugHook
@@TensorBoardDebugWrapperSession
@@WatchOptions
@@reconstruct_non_debug_graph_def
@@GradientsDebugger
@@clear_gradient_debuggers
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-imports
from tensorflow.python.debug.lib.debug_data import DebugDumpDir
from tensorflow.python.debug.lib.debug_data import DebugTensorDatum
from tensorflow.python.debug.lib.debug_data import has_inf_or_nan
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event
from tensorflow.python.debug.lib.debug_data import load_tensor_from_event_file
from tensorflow.python.debug.lib.debug_gradients import GradientsDebugger
from tensorflow.python.debug.lib.debug_graphs import reconstruct_non_debug_graph_def
from tensorflow.python.debug.lib.debug_utils import add_debug_tensor_watch
from tensorflow.python.debug.lib.debug_utils import watch_graph
from tensorflow.python.debug.lib.debug_utils import watch_graph_with_denylists
from tensorflow.python.debug.wrappers.dumping_wrapper import DumpingDebugWrapperSession
from tensorflow.python.debug.wrappers.framework import WatchOptions
from tensorflow.python.debug.wrappers.grpc_wrapper import GrpcDebugWrapperSession
from tensorflow.python.debug.wrappers.grpc_wrapper import TensorBoardDebugWrapperSession
from tensorflow.python.debug.wrappers.hooks import DumpingDebugHook
from tensorflow.python.debug.wrappers.hooks import GrpcDebugHook
from tensorflow.python.debug.wrappers.hooks import LocalCLIDebugHook
from tensorflow.python.debug.wrappers.hooks import TensorBoardDebugHook
from tensorflow.python.debug.wrappers.local_cli_wrapper import LocalCLIDebugWrapperSession
from tensorflow.python.util import all_util as _all_util
_all_util.remove_undocumented(__name__)
|
karllessard/tensorflow
|
tensorflow/python/debug/__init__.py
|
Python
|
apache-2.0
| 2,950
|
import gc
import os
import weakref
from cStringIO import StringIO
try:
import signal
except ImportError:
signal = None
import unittest2
class TestBreak(unittest2.TestCase):
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest2.signals._results = weakref.WeakKeyDictionary()
unittest2.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest2.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest2.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest2.TestResult()
unittest2.registerResult(result)
for ref in unittest2.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest2.TestResult()
unittest2.installHandler()
unittest2.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
result = unittest2.TestResult()
unittest2.installHandler()
unittest2.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest2.installHandler()
result = unittest2.TestResult()
unittest2.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest2.TestResult()
unittest2.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest2.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest2.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest2.TextTestRunner(stream=StringIO())
result = runner.run(unittest2.TestSuite())
self.assertIn(result, unittest2.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest2.TestResult()
unittest2.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest2.TestResult()
unittest2.registerResult(result)
unittest2.installHandler()
self.assertTrue(unittest2.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest2.removeResult(unittest2.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest2.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'verbosity': verbosity,
'failfast': failfast,
'buffer': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'verbosity': verbosity,
'failfast': failfast,
'buffer': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest2.installHandler()
unittest2.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest2.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest2.installHandler()
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test = unittest2.removeHandler(test)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
# Should also skip some tests on Jython
skipper = unittest2.skipUnless(hasattr(os, 'kill') and signal is not None,
"test uses os.kill(...) and the signal module")
TestBreak = skipper(TestBreak)
if __name__ == '__main__':
unittest2.main()
|
supercheetah/diceroller
|
pyinstaller/PyInstaller/lib/unittest2/test/test_break.py
|
Python
|
artistic-2.0
| 8,469
|
#!/usr/bin/env python3
#
# setup.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup;
setup(
name="stagger",
version="0.4.2",
url="http://code.google.com/p/stagger",
author="Karoly Lorentey",
author_email="karoly@lorentey.hu",
packages=["stagger"],
entry_points = {
'console_scripts': ['stagger = stagger.commandline:main']
},
test_suite = "test.alltests.suite",
license="BSD",
description="ID3v1/ID3v2 tag manipulation package in pure Python 3",
long_description="""
The ID3v2 tag format is notorious for its useless specification
documents and its quirky, mutually incompatible
part-implementations. Stagger is to provide a robust tagging package
that is able to handle all the various badly formatted tags out there
and allow you to convert them to a consensus format.
""",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Multimedia :: Sound/Audio"
],
)
|
SpaWnAge/stagger
|
setup.py
|
Python
|
bsd-2-clause
| 2,585
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_facts
version_added: "2.1"
author: "Nathaniel Case (@qalthos)"
short_description: Collect facts from remote devices running Juniper Junos
description:
- Collects fact information from a remote device running the Junos
operating system. By default, the module will collect basic fact
information from the device to be included with the hostvars.
Additional fact information can be collected based on the
configured set of arguments.
extends_documentation_fragment: junos
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected. To maintain backward compatbility old style facts
can be retrieved using all value, this reqires junos-eznc to be installed
as a prerequisite.
required: false
default: "!config"
version_added: "2.3"
config_format:
description:
- The I(config_format) argument specifies the format of the configuration
when serializing output from the device. This argument is applicable
only when C(config) value is present in I(gather_subset).
The I(config_format) should be supported by the junos version running on
device.
required: false
default: text
choices: ['xml', 'set', 'text', 'json']
version_added: "2.3"
requirements:
- ncclient (>=v0.5.2)
notes:
- Ensure I(config_format) used to retrieve configuration from device
is supported by junos version running on device.
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
"""
EXAMPLES = """
- name: collect default set of facts
junos_facts:
- name: collect default set of facts and configuration
junos_facts:
gather_subset: config
"""
RETURN = """
ansible_facts:
description: Returns the facts collect from the device
returned: always
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.junos import junos_argument_spec, check_args, get_param
from ansible.module_utils.junos import get_configuration
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.netconf import send_request
from ansible.module_utils.six import iteritems
try:
from lxml.etree import Element, SubElement, tostring
except ImportError:
from xml.etree.ElementTree import Element, SubElement, tostring
try:
from jnpr.junos import Device
from jnpr.junos.exception import ConnectError
HAS_PYEZ = True
except ImportError:
HAS_PYEZ = False
USE_PERSISTENT_CONNECTION = True
class FactsBase(object):
def __init__(self, module):
self.module = module
self.facts = dict()
def populate(self):
raise NotImplementedError
def cli(self, command):
reply = command(self.module, command)
output = reply.find('.//output')
if not output:
self.module.fail_json(msg='failed to retrieve facts for command %s' % command)
return str(output.text).strip()
def rpc(self, rpc):
return send_request(self.module, Element(rpc))
def get_text(self, ele, tag):
try:
return str(ele.find(tag).text).strip()
except AttributeError:
pass
class Default(FactsBase):
def populate(self):
reply = self.rpc('get-software-information')
data = reply.find('.//software-information')
self.facts.update({
'hostname': self.get_text(data, 'host-name'),
'version': self.get_text(data, 'junos-version'),
'model': self.get_text(data, 'product-model')
})
reply = self.rpc('get-chassis-inventory')
data = reply.find('.//chassis-inventory/chassis')
self.facts['serialnum'] = self.get_text(data, 'serial-number')
class Config(FactsBase):
def populate(self):
config_format = self.module.params['config_format']
reply = get_configuration(self.module, format=config_format)
if config_format == 'xml':
config = tostring(reply.find('configuration')).strip()
elif config_format == 'text':
config = self.get_text(reply, 'configuration-text')
elif config_format == 'json':
config = str(reply.text).strip()
elif config_format == 'set':
config = self.get_text(reply, 'configuration-set')
self.facts['config'] = config
class Hardware(FactsBase):
def populate(self):
reply = self.rpc('get-system-memory-information')
data = reply.find('.//system-memory-information/system-memory-summary-information')
self.facts.update({
'memfree_mb': int(self.get_text(data, 'system-memory-free')),
'memtotal_mb': int(self.get_text(data, 'system-memory-total'))
})
reply = self.rpc('get-system-storage')
data = reply.find('.//system-storage-information')
filesystems = list()
for obj in data:
filesystems.append(self.get_text(obj, 'filesystem-name'))
self.facts['filesystems'] = filesystems
reply = self.rpc('get-route-engine-information')
data = reply.find('.//route-engine-information')
routing_engines = dict()
for obj in data:
slot = self.get_text(obj, 'slot')
routing_engines.update({slot: {}})
routing_engines[slot].update({'slot': slot})
for child in obj:
if child.text != "\n":
routing_engines[slot].update({child.tag.replace("-", "_"): child.text})
self.facts['routing_engines'] = routing_engines
if len(data) > 1:
self.facts['has_2RE'] = True
else:
self.facts['has_2RE'] = False
reply = self.rpc('get-chassis-inventory')
data = reply.findall('.//chassis-module')
modules = list()
for obj in data:
mod = dict()
for child in obj:
if child.text != "\n":
mod.update({child.tag.replace("-", "_"): child.text})
modules.append(mod)
self.facts['modules'] = modules
class Interfaces(FactsBase):
def populate(self):
ele = Element('get-interface-information')
SubElement(ele, 'detail')
reply = send_request(self.module, ele)
interfaces = {}
for item in reply[0]:
name = self.get_text(item, 'name')
obj = {
'oper-status': self.get_text(item, 'oper-status'),
'admin-status': self.get_text(item, 'admin-status'),
'speed': self.get_text(item, 'speed'),
'macaddress': self.get_text(item, 'hardware-physical-address'),
'mtu': self.get_text(item, 'mtu'),
'type': self.get_text(item, 'if-type'),
}
interfaces[name] = obj
self.facts['interfaces'] = interfaces
class Facts(FactsBase):
def _connect(self, module):
host = get_param(module, 'host')
kwargs = {
'port': get_param(module, 'port') or 830,
'user': get_param(module, 'username')
}
if get_param(module, 'password'):
kwargs['passwd'] = get_param(module, 'password')
if get_param(module, 'ssh_keyfile'):
kwargs['ssh_private_key_file'] = get_param(module, 'ssh_keyfile')
kwargs['gather_facts'] = False
try:
device = Device(host, **kwargs)
device.open()
device.timeout = get_param(module, 'timeout') or 10
except ConnectError:
exc = get_exception()
module.fail_json('unable to connect to %s: %s' % (host, str(exc)))
return device
def populate(self):
device = self._connect(self.module)
facts = dict(device.facts)
if '2RE' in facts:
facts['has_2RE'] = facts['2RE']
del facts['2RE']
facts['version_info'] = dict(facts['version_info'])
if 'junos_info' in facts:
for key, value in facts['junos_info'].items():
if 'object' in value:
value['object'] = dict(value['object'])
return facts
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
config=Config,
interfaces=Interfaces
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
""" Main entry point for AnsibleModule
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list'),
config_format=dict(default='text', choices=['xml', 'text', 'set', 'json']),
)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
gather_subset = module.params['gather_subset']
ofacts = False
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
ofacts = True
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
ofacts = False
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Subset must be one of [%s], got %s' %
(', '.join(VALID_SUBSETS), subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
if ofacts:
if HAS_PYEZ:
ansible_facts.update(Facts(module).populate())
else:
warnings += ['junos-eznc is required to gather old style facts but does not appear to be installed. '
'It can be installed using `pip install junos-eznc`']
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
marratj/ansible
|
lib/ansible/modules/network/junos/junos_facts.py
|
Python
|
gpl-3.0
| 11,501
|
#!/usr/bin/env python
# WS client example with HTTP Basic Authentication
import asyncio
import websockets
async def hello():
uri = "ws://mary:p@ssw0rd@localhost:8765"
async with websockets.connect(uri) as websocket:
greeting = await websocket.recv()
print(greeting)
asyncio.get_event_loop().run_until_complete(hello())
|
CYBAI/servo
|
tests/wpt/web-platform-tests/tools/third_party/websockets/example/basic_auth_client.py
|
Python
|
mpl-2.0
| 347
|
# coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
bool_or_none,
int_or_none,
try_get,
unified_timestamp,
)
class RutubeBaseIE(InfoExtractor):
def _extract_video(self, video, video_id=None, require_title=True):
title = video['title'] if require_title else video.get('title')
age_limit = video.get('is_adult')
if age_limit is not None:
age_limit = 18 if age_limit is True else 0
uploader_id = try_get(video, lambda x: x['author']['id'])
category = try_get(video, lambda x: x['category']['name'])
return {
'id': video.get('id') or video_id,
'title': title,
'description': video.get('description'),
'thumbnail': video.get('thumbnail_url'),
'duration': int_or_none(video.get('duration')),
'uploader': try_get(video, lambda x: x['author']['name']),
'uploader_id': compat_str(uploader_id) if uploader_id else None,
'timestamp': unified_timestamp(video.get('created_ts')),
'category': [category] if category else None,
'age_limit': age_limit,
'view_count': int_or_none(video.get('hits')),
'comment_count': int_or_none(video.get('comments_count')),
'is_live': bool_or_none(video.get('is_livestream')),
}
class RutubeIE(RutubeBaseIE):
IE_NAME = 'rutube'
IE_DESC = 'Rutube videos'
_VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/(?P<id>[\da-z]{32})'
_TESTS = [{
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
'md5': '79938ade01294ef7e27574890d0d3769',
'info_dict': {
'id': '3eac3b4561676c17df9132a9a1e62e3e',
'ext': 'flv',
'title': 'Раненный кенгуру забежал в аптеку',
'description': 'http://www.ntdtv.ru ',
'duration': 80,
'uploader': 'NTDRussian',
'uploader_id': '29790',
'timestamp': 1381943602,
'upload_date': '20131016',
'age_limit': 0,
},
}, {
'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
'only_matching': True,
}, {
'url': 'http://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661',
'only_matching': True,
}, {
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/?pl_id=4252',
'only_matching': True,
}, {
'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_type=source',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if RutubePlaylistIE.suitable(url) else super(RutubeIE, cls).suitable(url)
@staticmethod
def _extract_urls(webpage):
return [mobj.group('url') for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//rutube\.ru/embed/[\da-z]{32}.*?)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://rutube.ru/api/video/%s/?format=json' % video_id,
video_id, 'Downloading video JSON')
info = self._extract_video(video, video_id)
options = self._download_json(
'http://rutube.ru/api/play/options/%s/?format=json' % video_id,
video_id, 'Downloading options JSON')
formats = []
for format_id, format_url in options['video_balancer'].items():
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_id, fatal=False))
else:
formats.append({
'url': format_url,
'format_id': format_id,
})
self._sort_formats(formats)
info['formats'] = formats
return info
class RutubeEmbedIE(InfoExtractor):
IE_NAME = 'rutube:embed'
IE_DESC = 'Rutube embedded videos'
_VALID_URL = r'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=',
'info_dict': {
'id': 'a10e53b86e8f349080f718582ce4c661',
'ext': 'flv',
'timestamp': 1387830582,
'upload_date': '20131223',
'uploader_id': '297833',
'description': 'Видео группы ★http://vk.com/foxkidsreset★ музей Fox Kids и Jetix<br/><br/> восстановлено и сделано в шикоформате subziro89 http://vk.com/subziro89',
'uploader': 'subziro89 ILya',
'title': 'Мистический городок Эйри в Индиан 5 серия озвучка subziro89',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://rutube.ru/play/embed/8083783',
'only_matching': True,
}]
def _real_extract(self, url):
embed_id = self._match_id(url)
webpage = self._download_webpage(url, embed_id)
canonical_url = self._html_search_regex(
r'<link\s+rel="canonical"\s+href="([^"]+?)"', webpage,
'Canonical URL')
return self.url_result(canonical_url, RutubeIE.ie_key())
class RutubePlaylistBaseIE(RutubeBaseIE):
def _next_page_url(self, page_num, playlist_id, *args, **kwargs):
return self._PAGE_TEMPLATE % (playlist_id, page_num)
def _entries(self, playlist_id, *args, **kwargs):
next_page_url = None
for pagenum in itertools.count(1):
page = self._download_json(
next_page_url or self._next_page_url(
pagenum, playlist_id, *args, **kwargs),
playlist_id, 'Downloading page %s' % pagenum)
results = page.get('results')
if not results or not isinstance(results, list):
break
for result in results:
video_url = result.get('video_url')
if not video_url or not isinstance(video_url, compat_str):
continue
entry = self._extract_video(result, require_title=False)
entry.update({
'_type': 'url',
'url': video_url,
'ie_key': RutubeIE.ie_key(),
})
yield entry
next_page_url = page.get('next')
if not next_page_url or not page.get('has_next'):
break
def _extract_playlist(self, playlist_id, *args, **kwargs):
return self.playlist_result(
self._entries(playlist_id, *args, **kwargs),
playlist_id, kwargs.get('playlist_name'))
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class RutubeChannelIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:channel'
IE_DESC = 'Rutube channels'
_VALID_URL = r'https?://rutube\.ru/tags/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/tags/video/1800/',
'info_dict': {
'id': '1800',
},
'playlist_mincount': 68,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json'
class RutubeMovieIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:movie'
IE_DESC = 'Rutube movies'
_VALID_URL = r'https?://rutube\.ru/metainfo/tv/(?P<id>\d+)'
_TESTS = []
_MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json'
_PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
def _real_extract(self, url):
movie_id = self._match_id(url)
movie = self._download_json(
self._MOVIE_TEMPLATE % movie_id, movie_id,
'Downloading movie JSON')
return self._extract_playlist(
movie_id, playlist_name=movie.get('name'))
class RutubePersonIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:person'
IE_DESC = 'Rutube person videos'
_VALID_URL = r'https?://rutube\.ru/video/person/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/video/person/313878/',
'info_dict': {
'id': '313878',
},
'playlist_mincount': 37,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json'
class RutubePlaylistIE(RutubePlaylistBaseIE):
IE_NAME = 'rutube:playlist'
IE_DESC = 'Rutube playlists'
_VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/[\da-z]{32}/\?.*?\bpl_id=(?P<id>\d+)'
_TESTS = [{
'url': 'https://rutube.ru/video/cecd58ed7d531fc0f3d795d51cee9026/?pl_id=3097&pl_type=tag',
'info_dict': {
'id': '3097',
},
'playlist_count': 27,
}, {
'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_id=4252&pl_type=source',
'only_matching': True,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/playlist/%s/%s/?page=%s&format=json'
@classmethod
def suitable(cls, url):
if not super(RutubePlaylistIE, cls).suitable(url):
return False
params = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
return params.get('pl_type', [None])[0] and int_or_none(params.get('pl_id', [None])[0])
def _next_page_url(self, page_num, playlist_id, item_kind):
return self._PAGE_TEMPLATE % (item_kind, playlist_id, page_num)
def _real_extract(self, url):
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
playlist_kind = qs['pl_type'][0]
playlist_id = qs['pl_id'][0]
return self._extract_playlist(playlist_id, item_kind=playlist_kind)
|
oskar456/youtube-dl
|
youtube_dl/extractor/rutube.py
|
Python
|
unlicense
| 10,151
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.sasl_mechanisms}.
"""
from __future__ import absolute_import, division
from twisted.trial import unittest
from twisted.python.compat import networkString
from twisted.words.protocols.jabber import sasl_mechanisms
class PlainTests(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.jabber.sasl_mechanisms.Plain}.
"""
def test_getInitialResponse(self):
"""
Test the initial response.
"""
m = sasl_mechanisms.Plain(None, u'test', u'secret')
self.assertEqual(m.getInitialResponse(), b'\x00test\x00secret')
class AnonymousTests(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.jabber.sasl_mechanisms.Anonymous}.
"""
def test_getInitialResponse(self):
"""
Test the initial response to be empty.
"""
m = sasl_mechanisms.Anonymous()
self.assertEqual(m.getInitialResponse(), None)
class DigestMD5Tests(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.jabber.sasl_mechanisms.DigestMD5}.
"""
def setUp(self):
self.mechanism = sasl_mechanisms.DigestMD5(
u'xmpp', u'example.org', None, u'test', u'secret')
def test_getInitialResponse(self):
"""
Test that no initial response is generated.
"""
self.assertIdentical(self.mechanism.getInitialResponse(), None)
def test_getResponse(self):
"""
The response to a Digest-MD5 challenge includes the parameters from the
challenge.
"""
challenge = (
b'realm="localhost",nonce="1234",qop="auth",charset=utf-8,'
b'algorithm=md5-sess')
directives = self.mechanism._parse(
self.mechanism.getResponse(challenge))
del directives[b"cnonce"], directives[b"response"]
self.assertEqual({
b'username': b'test', b'nonce': b'1234', b'nc': b'00000001',
b'qop': [b'auth'], b'charset': b'utf-8',
b'realm': b'localhost', b'digest-uri': b'xmpp/example.org'
}, directives)
def test_getResponseNonAsciiRealm(self):
"""
Bytes outside the ASCII range in the challenge are nevertheless
included in the response.
"""
challenge = (b'realm="\xc3\xa9chec.example.org",nonce="1234",'
b'qop="auth",charset=utf-8,algorithm=md5-sess')
directives = self.mechanism._parse(
self.mechanism.getResponse(challenge))
del directives[b"cnonce"], directives[b"response"]
self.assertEqual({
b'username': b'test', b'nonce': b'1234', b'nc': b'00000001',
b'qop': [b'auth'], b'charset': b'utf-8',
b'realm': b'\xc3\xa9chec.example.org',
b'digest-uri': b'xmpp/example.org'}, directives)
def test_getResponseNoRealm(self):
"""
The response to a challenge without a realm uses the host part of the
JID as the realm.
"""
challenge = b'nonce="1234",qop="auth",charset=utf-8,algorithm=md5-sess'
directives = self.mechanism._parse(
self.mechanism.getResponse(challenge))
self.assertEqual(directives[b'realm'], b'example.org')
def test_getResponseNoRealmIDN(self):
"""
If the challenge does not include a realm and the host part of the JID
includes bytes outside of the ASCII range, the response still includes
the host part of the JID as the realm.
"""
self.mechanism = sasl_mechanisms.DigestMD5(
u'xmpp', u'\u00e9chec.example.org', None, u'test', u'secret')
challenge = b'nonce="1234",qop="auth",charset=utf-8,algorithm=md5-sess'
directives = self.mechanism._parse(
self.mechanism.getResponse(challenge))
self.assertEqual(directives[b'realm'], b'\xc3\xa9chec.example.org')
def test_getResponseRspauth(self):
"""
If the challenge just has a rspauth directive, the response is empty.
"""
challenge = \
b'rspauth=cnNwYXV0aD1lYTQwZjYwMzM1YzQyN2I1NTI3Yjg0ZGJhYmNkZmZmZA=='
response = self.mechanism.getResponse(challenge)
self.assertEqual(b"", response)
def test_calculateResponse(self):
"""
The response to a Digest-MD5 challenge is computed according to RFC
2831.
"""
charset = 'utf-8'
nonce = b'OA6MG9tEQGm2hh'
nc = networkString('%08x' % (1,))
cnonce = b'OA6MHXh6VqTrRk'
username = u'\u0418chris'
password = u'\u0418secret'
host = u'\u0418elwood.innosoft.com'
digestURI = u'imap/\u0418elwood.innosoft.com'.encode(charset)
mechanism = sasl_mechanisms.DigestMD5(
b'imap', host, None, username, password)
response = mechanism._calculateResponse(
cnonce, nc, nonce, username.encode(charset),
password.encode(charset), host.encode(charset), digestURI)
self.assertEqual(response, b'7928f233258be88392424d094453c5e3')
def test_parse(self):
"""
A challenge can be parsed into a L{dict} with L{bytes} or L{list}
values.
"""
challenge = (
b'nonce="1234",qop="auth,auth-conf",charset=utf-8,'
b'algorithm=md5-sess,cipher="des,3des"')
directives = self.mechanism._parse(challenge)
self.assertEqual({
b"algorithm": b"md5-sess", b"nonce": b"1234",
b"charset": b"utf-8", b"qop": [b'auth', b'auth-conf'],
b"cipher": [b'des', b'3des']
}, directives)
|
EricMuller/mynotes-backend
|
requirements/twisted/Twisted-17.1.0/src/twisted/words/test/test_jabbersaslmechanisms.py
|
Python
|
mit
| 5,758
|
from __future__ import unicode_literals
from jinja2 import Template
from six.moves.urllib.parse import parse_qs, urlparse
from .models import route53_backend
import xmltodict
def list_or_create_hostzone_response(request, full_url, headers):
if request.method == "POST":
elements = xmltodict.parse(request.body)
comment = elements["CreateHostedZoneRequest"]["HostedZoneConfig"]["Comment"]
new_zone = route53_backend.create_hosted_zone(elements["CreateHostedZoneRequest"]["Name"], comment=comment)
template = Template(CREATE_HOSTED_ZONE_RESPONSE)
return 201, headers, template.render(zone=new_zone)
elif request.method == "GET":
all_zones = route53_backend.get_all_hosted_zones()
template = Template(LIST_HOSTED_ZONES_RESPONSE)
return 200, headers, template.render(zones=all_zones)
def get_or_delete_hostzone_response(request, full_url, headers):
parsed_url = urlparse(full_url)
zoneid = parsed_url.path.rstrip('/').rsplit('/', 1)[1]
the_zone = route53_backend.get_hosted_zone(zoneid)
if not the_zone:
return 404, headers, "Zone %s not Found" % zoneid
if request.method == "GET":
template = Template(GET_HOSTED_ZONE_RESPONSE)
return 200, headers, template.render(zone=the_zone)
elif request.method == "DELETE":
route53_backend.delete_hosted_zone(zoneid)
return 200, headers, DELETE_HOSTED_ZONE_RESPONSE
def rrset_response(request, full_url, headers):
parsed_url = urlparse(full_url)
method = request.method
zoneid = parsed_url.path.rstrip('/').rsplit('/', 2)[1]
the_zone = route53_backend.get_hosted_zone(zoneid)
if not the_zone:
return 404, headers, "Zone %s Not Found" % zoneid
if method == "POST":
elements = xmltodict.parse(request.body)
change_list = elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change']
if not isinstance(change_list, list):
change_list = [elements['ChangeResourceRecordSetsRequest']['ChangeBatch']['Changes']['Change']]
for value in change_list:
action = value['Action']
record_set = value['ResourceRecordSet']
if action == 'CREATE':
if 'ResourceRecords' in record_set:
resource_records = list(record_set['ResourceRecords'].values())[0]
if not isinstance(resource_records, list):
# Depending on how many records there are, this may or may not be a list
resource_records = [resource_records]
record_values = [x['Value'] for x in resource_records]
elif 'AliasTarget' in record_set:
record_values = [record_set['AliasTarget']['DNSName']]
record_set['ResourceRecords'] = record_values
the_zone.add_rrset(record_set)
elif action == "DELETE":
if 'SetIdentifier' in record_set:
the_zone.delete_rrset_by_id(record_set["SetIdentifier"])
else:
the_zone.delete_rrset_by_name(record_set["Name"])
return 200, headers, CHANGE_RRSET_RESPONSE
elif method == "GET":
querystring = parse_qs(parsed_url.query)
template = Template(LIST_RRSET_REPONSE)
type_filter = querystring.get("type", [None])[0]
name_filter = querystring.get("name", [None])[0]
record_sets = the_zone.get_record_sets(type_filter, name_filter)
return 200, headers, template.render(record_sets=record_sets)
def health_check_response(request, full_url, headers):
parsed_url = urlparse(full_url)
method = request.method
if method == "POST":
properties = xmltodict.parse(request.body)['CreateHealthCheckRequest']['HealthCheckConfig']
health_check_args = {
"ip_address": properties.get('IPAddress'),
"port": properties.get('Port'),
"type": properties['Type'],
"resource_path": properties.get('ResourcePath'),
"fqdn": properties.get('FullyQualifiedDomainName'),
"search_string": properties.get('SearchString'),
"request_interval": properties.get('RequestInterval'),
"failure_threshold": properties.get('FailureThreshold'),
}
health_check = route53_backend.create_health_check(health_check_args)
template = Template(CREATE_HEALTH_CHECK_RESPONSE)
return 201, headers, template.render(health_check=health_check)
elif method == "DELETE":
health_check_id = parsed_url.path.split("/")[-1]
route53_backend.delete_health_check(health_check_id)
return 200, headers, DELETE_HEALTH_CHECK_REPONSE
elif method == "GET":
template = Template(LIST_HEALTH_CHECKS_REPONSE)
health_checks = route53_backend.get_health_checks()
return 200, headers, template.render(health_checks=health_checks)
LIST_RRSET_REPONSE = """<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ResourceRecordSets>
{% for record_set in record_sets %}
{{ record_set.to_xml() }}
{% endfor %}
</ResourceRecordSets>
</ListResourceRecordSetsResponse>"""
CHANGE_RRSET_RESPONSE = """<ChangeResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ChangeInfo>
<Status>INSYNC</Status>
<SubmittedAt>2010-09-10T01:36:41.958Z</SubmittedAt>
<Id>/change/C2682N5HXP0BZ4</Id>
</ChangeInfo>
</ChangeResourceRecordSetsResponse>"""
DELETE_HOSTED_ZONE_RESPONSE = """<DeleteHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ChangeInfo>
</ChangeInfo>
</DeleteHostedZoneResponse>"""
GET_HOSTED_ZONE_RESPONSE = """<GetHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
<Config>
<Comment>{{ zone.comment }}</Comment>
</Config>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>moto.test.com</NameServer>
</NameServers>
</DelegationSet>
</GetHostedZoneResponse>"""
CREATE_HOSTED_ZONE_RESPONSE = """<CreateHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<ResourceRecordSetCount>0</ResourceRecordSetCount>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>moto.test.com</NameServer>
</NameServers>
</DelegationSet>
</CreateHostedZoneResponse>"""
LIST_HOSTED_ZONES_RESPONSE = """<ListHostedZonesResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZones>
{% for zone in zones %}
<HostedZone>
<Id>{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<Config>
<Comment>{{ zone.comment }}</Comment>
</Config>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
</HostedZone>
{% endfor %}
</HostedZones>
</ListHostedZonesResponse>"""
CREATE_HEALTH_CHECK_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
{{ health_check.to_xml() }}
</CreateHealthCheckResponse>"""
LIST_HEALTH_CHECKS_REPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListHealthChecksResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<HealthChecks>
{% for health_check in health_checks %}
{{ health_check.to_xml() }}
{% endfor %}
</HealthChecks>
<IsTruncated>false</IsTruncated>
<MaxItems>{{ health_checks|length }}</MaxItems>
</ListHealthChecksResponse>"""
DELETE_HEALTH_CHECK_REPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
</DeleteHealthCheckResponse>"""
|
rouge8/moto
|
moto/route53/responses.py
|
Python
|
apache-2.0
| 8,044
|
# ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import struct
import pkgutil
from twitter.common.java.perfdata import PerfData
import mock
import pytest
_EXAMPLE_RESOURCE = 'resources/example_hsperfdata'
def test_perfdata_integration():
provider = lambda: pkgutil.get_data('twitter.common.java', _EXAMPLE_RESOURCE)
perfdata = PerfData.get(provider)
assert perfdata is not None
perfdata.sample()
assert len(perfdata) > 0
keys = set(perfdata)
for key in perfdata:
assert key in keys
assert perfdata[key] is not None
def test_struct_unpack_error():
provider = lambda: pkgutil.get_data('twitter.common.java', _EXAMPLE_RESOURCE)
perfdata = PerfData.get(provider)
assert perfdata is not None
with mock.patch('struct.unpack') as struct_unpack:
struct_unpack.side_effect = struct.error('My shit got corrupted!')
with pytest.raises(perfdata.ParseError):
perfdata.sample()
def test_empty_ish_perfdata():
provider = lambda: ''
with pytest.raises(ValueError):
perfdata = PerfData.get(provider)
provider = lambda: PerfData.MAGIC
with pytest.raises(ValueError):
perfdata = PerfData.get(provider)
|
abel-von/commons
|
tests/python/twitter/common/java/test_perfdata.py
|
Python
|
apache-2.0
| 2,009
|
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add dns_name to Port
Revision ID: 34af2b5c5a59
Revises: 9859ac9c136
Create Date: 2015-08-23 00:22:47.618593
"""
# revision identifiers, used by Alembic.
revision = '34af2b5c5a59'
down_revision = '9859ac9c136'
from alembic import op
import sqlalchemy as sa
from neutron.extensions import dns
def upgrade():
op.add_column('ports',
sa.Column('dns_name',
sa.String(length=dns.FQDN_MAX_LEN),
nullable=True))
|
barnsnake351/neutron
|
neutron/db/migration/alembic_migrations/versions/liberty/expand/34af2b5c5a59_add_dns_name_to_port.py
|
Python
|
apache-2.0
| 1,095
|
import copy
import datetime
import re
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.utils import DatabaseError
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_alter_column_null = "MODIFY %(column)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s NOT NULL"
sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s"
sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
sql_create_column_inline_fk = 'CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s'
sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
def quote_value(self, value):
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return "'%s'" % value
elif isinstance(value, str):
return "'%s'" % value.replace("\'", "\'\'").replace('%', '%%')
elif isinstance(value, (bytes, bytearray, memoryview)):
return "'%s'" % value.hex()
elif isinstance(value, bool):
return "1" if value else "0"
else:
return str(value)
def remove_field(self, model, field):
# If the column is an identity column, drop the identity before
# removing the field.
if self._is_identity_column(model._meta.db_table, field.column):
self._drop_identity(model._meta.db_table, field.column)
super().remove_field(model, field)
def delete_model(self, model):
# Run superclass action
super().delete_model(model)
# Clean up manually created sequence.
self.execute("""
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(1) INTO i FROM USER_SEQUENCES
WHERE SEQUENCE_NAME = '%(sq_name)s';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % {'sq_name': self.connection.ops._get_no_autofield_sequence_name(model._meta.db_table)})
def alter_field(self, model, old_field, new_field, strict=False):
try:
super().alter_field(model, old_field, new_field, strict)
except DatabaseError as e:
description = str(e)
# If we're changing type to an unsupported type we need a
# SQLite-ish workaround
if 'ORA-22858' in description or 'ORA-22859' in description:
self._alter_field_type_workaround(model, old_field, new_field)
# If an identity column is changing to a non-numeric type, drop the
# identity first.
elif 'ORA-30675' in description:
self._drop_identity(model._meta.db_table, old_field.column)
self.alter_field(model, old_field, new_field, strict)
# If a primary key column is changing to an identity column, drop
# the primary key first.
elif 'ORA-30673' in description and old_field.primary_key:
self._delete_primary_key(model, strict=True)
self._alter_field_type_workaround(model, old_field, new_field)
else:
raise
def _alter_field_type_workaround(self, model, old_field, new_field):
"""
Oracle refuses to change from some type to other type.
What we need to do instead is:
- Add a nullable version of the desired field with a temporary name. If
the new column is an auto field, then the temporary column can't be
nullable.
- Update the table to transfer values from old to new
- Drop old column
- Rename the new column and possibly drop the nullable property
"""
# Make a new field that's like the new one but with a temporary
# column name.
new_temp_field = copy.deepcopy(new_field)
new_temp_field.null = (new_field.get_internal_type() not in ('AutoField', 'BigAutoField', 'SmallAutoField'))
new_temp_field.column = self._generate_temp_name(new_field.column)
# Add it
self.add_field(model, new_temp_field)
# Explicit data type conversion
# https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf
# /Data-Type-Comparison-Rules.html#GUID-D0C5A47E-6F93-4C2D-9E49-4F2B86B359DD
new_value = self.quote_name(old_field.column)
old_type = old_field.db_type(self.connection)
if re.match('^N?CLOB', old_type):
new_value = "TO_CHAR(%s)" % new_value
old_type = 'VARCHAR2'
if re.match('^N?VARCHAR2', old_type):
new_internal_type = new_field.get_internal_type()
if new_internal_type == 'DateField':
new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value
elif new_internal_type == 'DateTimeField':
new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
elif new_internal_type == 'TimeField':
# TimeField are stored as TIMESTAMP with a 1900-01-01 date part.
new_value = "TO_TIMESTAMP(CONCAT('1900-01-01 ', %s), 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
# Transfer values across
self.execute("UPDATE %s set %s=%s" % (
self.quote_name(model._meta.db_table),
self.quote_name(new_temp_field.column),
new_value,
))
# Drop the old field
self.remove_field(model, old_field)
# Rename and possibly make the new field NOT NULL
super().alter_field(model, new_temp_field, new_field)
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by
quote_name() but without the quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
def _generate_temp_name(self, for_name):
"""Generate temporary names for workarounds that need temp columns."""
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)
def prepare_default(self, value):
return self.quote_value(value)
def _field_should_be_indexed(self, model, field):
create_index = super()._field_should_be_indexed(model, field)
db_type = field.db_type(self.connection)
if db_type is not None and db_type.lower() in self.connection._limited_data_types:
return False
return create_index
def _unique_should_be_added(self, old_field, new_field):
return (
super()._unique_should_be_added(old_field, new_field) and
not self._field_became_primary_key(old_field, new_field)
)
def _is_identity_column(self, table_name, column_name):
with self.connection.cursor() as cursor:
cursor.execute("""
SELECT
CASE WHEN identity_column = 'YES' THEN 1 ELSE 0 END
FROM user_tab_cols
WHERE table_name = %s AND
column_name = %s
""", [self.normalize_name(table_name), self.normalize_name(column_name)])
row = cursor.fetchone()
return row[0] if row else False
def _drop_identity(self, table_name, column_name):
self.execute('ALTER TABLE %(table)s MODIFY %(column)s DROP IDENTITY' % {
'table': self.quote_name(table_name),
'column': self.quote_name(column_name),
})
|
georgemarshall/django
|
django/db/backends/oracle/schema.py
|
Python
|
bsd-3-clause
| 7,809
|
# -*- coding: utf-8 -*-
"""
Mixins for fields.
"""
from bok_choy.promise import EmptyPromise
from common.test.acceptance.tests.helpers import get_selected_option_text, select_option_by_text
class FieldsMixin(object):
"""
Methods for testing fields in pages.
"""
def field(self, field_id):
"""
Return field with field_id.
"""
query = self.q(css='.u-field-{}'.format(field_id))
return query.text[0] if query.present else None
def wait_for_field(self, field_id):
"""
Wait for a field to appear in DOM.
"""
EmptyPromise(
lambda: self.field(field_id) is not None,
"Field with id \"{0}\" is in DOM.".format(field_id)
).fulfill()
def mode_for_field(self, field_id):
"""
Extract current field mode.
Returns:
`placeholder`/`edit`/`display`
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{}'.format(field_id))
if not query.present:
return None
field_classes = query.attrs('class')[0].split()
if 'mode-placeholder' in field_classes:
return 'placeholder'
if 'mode-display' in field_classes:
return 'display'
if 'mode-edit' in field_classes:
return 'edit'
def icon_for_field(self, field_id, icon_id):
"""
Check if field icon is present.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-icon'.format(field_id))
return query.present and icon_id in query.attrs('class')[0].split()
def title_for_field(self, field_id):
"""
Return the title of a field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-title'.format(field_id))
return query.text[0] if query.present else None
def message_for_field(self, field_id):
"""
Return the current message in a field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-message'.format(field_id))
return query.text[0] if query.present else None
def message_for_textarea_field(self, field_id):
"""
Return the current message for textarea field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-message-help'.format(field_id))
return query.text[0] if query.present else None
def wait_for_message(self, field_id, message):
"""
Wait for a message to appear in a field.
"""
EmptyPromise(
lambda: message in (self.message_for_field(field_id) or ''),
"Messsage \"{0}\" is visible.".format(message)
).fulfill()
def indicator_for_field(self, field_id):
"""
Return the name of the current indicator in a field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-message .fa'.format(field_id))
return [
class_name for class_name
in query.attrs('class')[0].split(' ')
if class_name.startswith('message')
][0].partition('-')[2] if query.present else None
def wait_for_indicator(self, field_id, indicator):
"""
Wait for an indicator to appear in a field.
"""
EmptyPromise(
lambda: indicator == self.indicator_for_field(field_id),
"Indicator \"{0}\" is visible.".format(self.indicator_for_field(field_id))
).fulfill()
def make_field_editable(self, field_id):
"""
Make a field editable.
"""
query = self.q(css='.u-field-{}'.format(field_id))
if not query.present:
return None
field_classes = query.attrs('class')[0].split()
if 'mode-placeholder' in field_classes or 'mode-display' in field_classes:
if field_id == 'bio':
bio_field_selector = '.u-field-bio > .wrapper-u-field'
self.wait_for_element_visibility(bio_field_selector, 'Bio field is visible')
self.browser.execute_script("$('" + bio_field_selector + "').click();")
else:
self.q(css='.u-field-{}'.format(field_id)).first.click()
def value_for_readonly_field(self, field_id):
"""
Return the value in a readonly field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} .u-field-value'.format(field_id))
if not query.present:
return None
return query.text[0]
def value_for_text_field(self, field_id, value=None, press_enter=True):
"""
Get or set the value of a text field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} input'.format(field_id))
if not query.present:
return None
if value is not None:
current_value = query.attrs('value')[0]
query.results[0].send_keys(u'\ue003' * len(current_value)) # Delete existing value.
query.results[0].send_keys(value) # Input new value
if press_enter:
query.results[0].send_keys(u'\ue007') # Press Enter
return query.attrs('value')[0]
def set_value_for_textarea_field(self, field_id, value):
"""
Set the value of a textarea field.
"""
self.wait_for_field(field_id)
self.make_field_editable(field_id)
field_selector = '.u-field-{} textarea'.format(field_id)
self.wait_for_element_presence(field_selector, 'Editable textarea is present.')
query = self.q(css=field_selector)
query.fill(value)
query.results[0].send_keys(u'\ue007') # Press Enter
def get_non_editable_mode_value(self, field_id):
"""
Return value of field in `display` or `placeholder` mode.
"""
self.wait_for_field(field_id)
self.wait_for_ajax()
return self.q(css='.u-field-{} .u-field-value .u-field-value-readonly'.format(field_id)).text[0]
def value_for_dropdown_field(self, field_id, value=None):
"""
Get or set the value in a dropdown field.
"""
self.wait_for_field(field_id)
self.make_field_editable(field_id)
query = self.q(css='.u-field-{} select'.format(field_id))
if not query.present:
return None
if value is not None:
select_option_by_text(query, value)
if self.mode_for_field(field_id) == 'edit':
return get_selected_option_text(query)
else:
return self.get_non_editable_mode_value(field_id)
def link_title_for_link_field(self, field_id):
"""
Return the title of the link in a link field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-link-title-{}'.format(field_id))
return query.text[0] if query.present else None
def wait_for_link_title_for_link_field(self, field_id, expected_title):
"""
Wait until the title of the specified link field equals expected_title.
"""
return EmptyPromise(
lambda: self.link_title_for_link_field(field_id) == expected_title,
"Link field with link title \"{0}\" is visible.".format(expected_title)
).fulfill()
def click_on_link_in_link_field(self, field_id, field_type='a'):
"""
Click the link in a link field.
"""
self.wait_for_field(field_id)
query = self.q(css='.u-field-{} {}'.format(field_id, field_type))
if query.present:
query.first.click()
def error_for_field(self, field_id):
"""
Returns bool based on the highlighted border for field.
"""
query = self.q(css='.u-field-{}.error'.format(field_id))
return True if query.present else False
|
louyihua/edx-platform
|
common/test/acceptance/pages/lms/fields.py
|
Python
|
agpl-3.0
| 7,991
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Unpack Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class UnpackOpTest(tf.test.TestCase):
def testSimple(self):
np.random.seed(7)
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
# Convert data to a single tensorflow tensor
x = tf.constant(data)
# Unpack into a list of tensors
cs = tf.unpack(x, num=shape[0])
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [c.eval() for c in cs]
self.assertAllEqual(cs, data)
def testGradients(self):
for use_gpu in False, True:
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
for i in xrange(shape[0]):
with self.test_session(use_gpu=use_gpu):
x = tf.constant(data)
cs = tf.unpack(x, num=shape[0])
err = tf.test.compute_gradient_error(x, shape, cs[i], shapes[i])
self.assertLess(err, 1e-6)
def testInferNum(self):
with self.test_session():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
x = tf.placeholder(np.float32, shape=shape)
cs = tf.unpack(x)
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
def testCannotInferNum(self):
x = tf.placeholder(np.float32)
with self.assertRaisesRegexp(
ValueError, r'Cannot infer num from shape <unknown>'):
tf.unpack(x)
if __name__ == '__main__':
tf.test.main()
|
ninotoshi/tensorflow
|
tensorflow/python/kernel_tests/unpack_op_test.py
|
Python
|
apache-2.0
| 2,525
|
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestCrossCovariance(unittest.TestCase):
def setUp(self):
self.y = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
self.z = numpy.random.uniform(-1, 1, (4, 2)).astype(numpy.float32)
def check_forward(self, y_data, z_data):
y = chainer.Variable(y_data)
z = chainer.Variable(z_data)
loss = functions.cross_covariance(y, z)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(loss.data)
# Compute expected value
y_data, z_data = cuda.to_cpu(y_data), cuda.to_cpu(z_data)
y_mean = y_data.mean(axis=0)
z_mean = z_data.mean(axis=0)
N = y_data.shape[0]
loss_expect = 0
for i in six.moves.xrange(y_data.shape[1]):
for j in six.moves.xrange(z_data.shape[1]):
ij_loss = 0.
for n in six.moves.xrange(N):
ij_loss += (y_data[n, i] - y_mean[i]) * (
z_data[n, j] - z_mean[j])
ij_loss /= N
loss_expect += ij_loss ** 2
loss_expect *= 0.5
self.assertAlmostEqual(loss_expect, loss_value, places=5)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.y, self.z)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.y), cuda.to_gpu(self.z))
def check_backward(self, y_data, z_data):
y = chainer.Variable(y_data)
z = chainer.Variable(z_data)
loss = functions.cross_covariance(y, z)
scaled_loss = 0.5 * loss
scaled_loss.backward()
func = loss.creator
f = lambda: func.forward((y.data, z.data))
gy, gz = gradient_check.numerical_grad(f, (y.data, z.data),
(0.5,), eps=0.02)
gradient_check.assert_allclose(gy, y.grad)
gradient_check.assert_allclose(gz, z.grad)
def check_type(self, y_data, z_data):
y = chainer.Variable(y_data)
z = chainer.Variable(z_data)
loss = functions.cross_covariance(y, z)
loss.backward()
self.assertEqual(y_data.dtype, y.grad.dtype)
self.assertEqual(z_data.dtype, z.grad.dtype)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.y, self.z)
def test_backward_type_cpu(self):
self.check_type(self.y, self.z)
@attr.gpu
def test_backward_type_gpu(self):
self.check_type(cuda.to_gpu(self.y), cuda.to_gpu(self.z))
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.y), cuda.to_gpu(self.z))
testing.run_module(__name__, __file__)
|
masia02/chainer
|
tests/chainer_tests/functions_tests/test_cross_covariance.py
|
Python
|
mit
| 3,030
|
#!/usr/bin/env python
#
# Copyright 2008,2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gnuradio import gr, gr_unittest
import fft_swig as fft
import sys
import random
primes = (2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,
59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131,
137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,223,
227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311)
class test_fft(gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def assert_fft_ok2(self, expected_result, result_data):
expected_result = expected_result[:len(result_data)]
self.assertComplexTuplesAlmostEqual2 (expected_result, result_data,
abs_eps=1e-9, rel_eps=4e-4)
def assert_fft_float_ok2(self, expected_result, result_data, abs_eps=1e-9, rel_eps=4e-4):
expected_result = expected_result[:len(result_data)]
self.assertFloatTuplesAlmostEqual2 (expected_result, result_data,
abs_eps, rel_eps)
def test_001(self):
tb = gr.top_block()
fft_size = 32
src_data = tuple([complex(primes[2*i], primes[2*i+1]) for i in range(fft_size)])
expected_result = ((4377+4516j),
(-1706.1268310546875+1638.4256591796875j),
(-915.2083740234375+660.69427490234375j),
(-660.370361328125+381.59600830078125j),
(-499.96044921875+238.41630554199219j),
(-462.26748657226562+152.88948059082031j),
(-377.98440551757812+77.5928955078125j),
(-346.85821533203125+47.152004241943359j),
(-295+20j),
(-286.33609008789062-22.257017135620117j),
(-271.52999877929688-33.081821441650391j),
(-224.6358642578125-67.019538879394531j),
(-244.24473571777344-91.524826049804688j),
(-203.09068298339844-108.54627227783203j),
(-198.45195007324219-115.90768432617188j),
(-182.97744750976562-128.12318420410156j),
(-167-180j),
(-130.33688354492188-173.83778381347656j),
(-141.19784545898438-190.28807067871094j),
(-111.09677124023438-214.48896789550781j),
(-70.039543151855469-242.41630554199219j),
(-68.960540771484375-228.30015563964844j),
(-53.049201965332031-291.47097778320312j),
(-28.695289611816406-317.64553833007812j),
(57-300j),
(45.301143646240234-335.69509887695312j),
(91.936195373535156-373.32437133789062j),
(172.09465026855469-439.275146484375j),
(242.24473571777344-504.47515869140625j),
(387.81732177734375-666.6788330078125j),
(689.48553466796875-918.2142333984375j),
(1646.539306640625-1694.1956787109375j))
src = gr.vector_source_c(src_data)
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, fft_size)
op = fft.fft_vcc(fft_size, True, [], False)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, fft_size)
dst = gr.vector_sink_c()
tb.connect(src, s2v, op, v2s, dst)
tb.run()
result_data = dst.data()
#print 'expected:', expected_result
#print 'results: ', result_data
#self.assertComplexTuplesAlmostEqual (expected_result, result_data, 5)
self.assert_fft_ok2(expected_result, result_data)
def test_002(self):
tb = gr.top_block()
fft_size = 32
tmp_data = ((4377+4516j),
(-1706.1268310546875+1638.4256591796875j),
(-915.2083740234375+660.69427490234375j),
(-660.370361328125+381.59600830078125j),
(-499.96044921875+238.41630554199219j),
(-462.26748657226562+152.88948059082031j),
(-377.98440551757812+77.5928955078125j),
(-346.85821533203125+47.152004241943359j),
(-295+20j),
(-286.33609008789062-22.257017135620117j),
(-271.52999877929688-33.081821441650391j),
(-224.6358642578125-67.019538879394531j),
(-244.24473571777344-91.524826049804688j),
(-203.09068298339844-108.54627227783203j),
(-198.45195007324219-115.90768432617188j),
(-182.97744750976562-128.12318420410156j),
(-167-180j),
(-130.33688354492188-173.83778381347656j),
(-141.19784545898438-190.28807067871094j),
(-111.09677124023438-214.48896789550781j),
(-70.039543151855469-242.41630554199219j),
(-68.960540771484375-228.30015563964844j),
(-53.049201965332031-291.47097778320312j),
(-28.695289611816406-317.64553833007812j),
(57-300j),
(45.301143646240234-335.69509887695312j),
(91.936195373535156-373.32437133789062j),
(172.09465026855469-439.275146484375j),
(242.24473571777344-504.47515869140625j),
(387.81732177734375-666.6788330078125j),
(689.48553466796875-918.2142333984375j),
(1646.539306640625-1694.1956787109375j))
src_data = tuple([x/fft_size for x in tmp_data])
expected_result = tuple([complex(primes[2*i], primes[2*i+1]) for i in range(fft_size)])
src = gr.vector_source_c(src_data)
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, fft_size)
op = fft.fft_vcc(fft_size, False, [], False)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, fft_size)
dst = gr.vector_sink_c()
tb.connect(src, s2v, op, v2s, dst)
tb.run()
result_data = dst.data()
#print 'expected:', expected_result
#print 'results: ', result_data
#self.assertComplexTuplesAlmostEqual (expected_result, result_data, 5)
self.assert_fft_ok2(expected_result, result_data)
def test_003(self):
# Same test as above, only use 2 threads
tb = gr.top_block()
fft_size = 32
tmp_data = ((4377+4516j),
(-1706.1268310546875+1638.4256591796875j),
(-915.2083740234375+660.69427490234375j),
(-660.370361328125+381.59600830078125j),
(-499.96044921875+238.41630554199219j),
(-462.26748657226562+152.88948059082031j),
(-377.98440551757812+77.5928955078125j),
(-346.85821533203125+47.152004241943359j),
(-295+20j),
(-286.33609008789062-22.257017135620117j),
(-271.52999877929688-33.081821441650391j),
(-224.6358642578125-67.019538879394531j),
(-244.24473571777344-91.524826049804688j),
(-203.09068298339844-108.54627227783203j),
(-198.45195007324219-115.90768432617188j),
(-182.97744750976562-128.12318420410156j),
(-167-180j),
(-130.33688354492188-173.83778381347656j),
(-141.19784545898438-190.28807067871094j),
(-111.09677124023438-214.48896789550781j),
(-70.039543151855469-242.41630554199219j),
(-68.960540771484375-228.30015563964844j),
(-53.049201965332031-291.47097778320312j),
(-28.695289611816406-317.64553833007812j),
(57-300j),
(45.301143646240234-335.69509887695312j),
(91.936195373535156-373.32437133789062j),
(172.09465026855469-439.275146484375j),
(242.24473571777344-504.47515869140625j),
(387.81732177734375-666.6788330078125j),
(689.48553466796875-918.2142333984375j),
(1646.539306640625-1694.1956787109375j))
src_data = tuple([x/fft_size for x in tmp_data])
expected_result = tuple([complex(primes[2*i], primes[2*i+1]) for i in range(fft_size)])
nthreads = 2
src = gr.vector_source_c(src_data)
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, fft_size)
op = fft.fft_vcc(fft_size, False, [], False, nthreads)
v2s = gr.vector_to_stream(gr.sizeof_gr_complex, fft_size)
dst = gr.vector_sink_c()
tb.connect(src, s2v, op, v2s, dst)
tb.run()
result_data = dst.data()
self.assert_fft_ok2(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_fft, "test_fft.xml")
|
gnu-sandhi/sandhi
|
modules/gr36/gr-fft/python/qa_fft.py
|
Python
|
gpl-3.0
| 9,998
|
"""
This module defines the mpf, mpc classes, and standard functions for
operating with them.
"""
__docformat__ = 'plaintext'
import re
from .ctx_base import StandardBaseContext
from .libmp.backend import basestring, BACKEND
from . import libmp
from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
ComplexResult, to_pickable, from_pickable, normalize,
from_int, from_float, from_str, to_int, to_float, to_str,
from_rational, from_man_exp,
fone, fzero, finf, fninf, fnan,
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
mpf_hash, mpf_rand,
mpf_sum,
bitcount, to_fixed,
mpc_to_str,
mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
mpc_mpf_div,
mpf_pow,
mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
mpf_glaisher, mpf_twinprime, mpf_mertens,
int_types)
from . import function_docs
from . import rational
new = object.__new__
get_complex = re.compile(r'^\(?(?P<re>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?)??'
r'(?P<im>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?j)?\)?$')
if BACKEND == 'sage':
from sage.libs.mpmath.ext_main import Context as BaseMPContext
# pickle hack
import sage.libs.mpmath.ext_main as _mpf_module
else:
from .ctx_mp_python import PythonMPContext as BaseMPContext
from . import ctx_mp_python as _mpf_module
from .ctx_mp_python import _mpf, _mpc, mpnumeric
class MPContext(BaseMPContext, StandardBaseContext):
"""
Context for multiprecision arithmetic with a global precision.
"""
def __init__(ctx):
BaseMPContext.__init__(ctx)
ctx.trap_complex = False
ctx.pretty = False
ctx.types = [ctx.mpf, ctx.mpc, ctx.constant]
ctx._mpq = rational.mpq
ctx.default()
StandardBaseContext.__init__(ctx)
ctx.mpq = rational.mpq
ctx.init_builtins()
ctx.hyp_summators = {}
ctx._init_aliases()
# XXX: automate
try:
ctx.bernoulli.im_func.func_doc = function_docs.bernoulli
ctx.primepi.im_func.func_doc = function_docs.primepi
ctx.psi.im_func.func_doc = function_docs.psi
ctx.atan2.im_func.func_doc = function_docs.atan2
except AttributeError:
# python 3
ctx.bernoulli.__func__.func_doc = function_docs.bernoulli
ctx.primepi.__func__.func_doc = function_docs.primepi
ctx.psi.__func__.func_doc = function_docs.psi
ctx.atan2.__func__.func_doc = function_docs.atan2
ctx.digamma.func_doc = function_docs.digamma
ctx.cospi.func_doc = function_docs.cospi
ctx.sinpi.func_doc = function_docs.sinpi
def init_builtins(ctx):
mpf = ctx.mpf
mpc = ctx.mpc
# Exact constants
ctx.one = ctx.make_mpf(fone)
ctx.zero = ctx.make_mpf(fzero)
ctx.j = ctx.make_mpc((fzero,fone))
ctx.inf = ctx.make_mpf(finf)
ctx.ninf = ctx.make_mpf(fninf)
ctx.nan = ctx.make_mpf(fnan)
eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1),
"epsilon of working precision", "eps")
ctx.eps = eps
# Approximate constants
ctx.pi = ctx.constant(mpf_pi, "pi", "pi")
ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2")
ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10")
ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi")
ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e")
ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler")
ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan")
ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin")
ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher")
ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery")
ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree")
ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime")
ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens")
# Standard functions
ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt)
ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt)
ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log)
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp)
ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj)
ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi)
ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin)
ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos)
ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan)
ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh)
ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh)
ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh)
ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin)
ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos)
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh)
ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh)
ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh)
ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi)
ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi)
ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor)
ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil)
ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint)
ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac)
ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci)
ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma)
ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma)
ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma)
ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial)
ctx.gamma_old = ctx._wrap_libmp_function(libmp.mpf_gamma_old, libmp.mpc_gamma_old)
ctx.fac_old = ctx.factorial_old = ctx._wrap_libmp_function(libmp.mpf_factorial_old, libmp.mpc_factorial_old)
ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0)
ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic)
ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei)
ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1)
ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci)
ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si)
ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk)
ctx._ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe)
ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1)
ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None)
ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None)
ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta)
ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta)
# Faster versions
ctx.sqrt = getattr(ctx, "_sage_sqrt", ctx.sqrt)
ctx.exp = getattr(ctx, "_sage_exp", ctx.exp)
ctx.ln = getattr(ctx, "_sage_ln", ctx.ln)
ctx.cos = getattr(ctx, "_sage_cos", ctx.cos)
ctx.sin = getattr(ctx, "_sage_sin", ctx.sin)
def to_fixed(ctx, x, prec):
return x.to_fixed(prec)
def hypot(ctx, x, y):
r"""
Computes the Euclidean norm of the vector `(x, y)`, equal
to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real."""
x = ctx.convert(x)
y = ctx.convert(y)
return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding))
def _gamma_upper_int(ctx, n, z):
n = int(ctx._re(n))
if n == 0:
return ctx.e1(z)
if not hasattr(z, '_mpf_'):
raise NotImplementedError
prec, rounding = ctx._prec_rounding
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True)
if imag is None:
return ctx.make_mpf(real)
else:
return ctx.make_mpc((real, imag))
def _expint_int(ctx, n, z):
n = int(n)
if n == 1:
return ctx.e1(z)
if not hasattr(z, '_mpf_'):
raise NotImplementedError
prec, rounding = ctx._prec_rounding
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding)
if imag is None:
return ctx.make_mpf(real)
else:
return ctx.make_mpc((real, imag))
def _nthroot(ctx, x, n):
if hasattr(x, '_mpf_'):
try:
return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding))
except ComplexResult:
if ctx.trap_complex:
raise
x = (x._mpf_, libmp.fzero)
else:
x = x._mpc_
return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding))
def _besselj(ctx, n, z):
prec, rounding = ctx._prec_rounding
if hasattr(z, '_mpf_'):
return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding))
elif hasattr(z, '_mpc_'):
return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding))
def _agm(ctx, a, b=1):
prec, rounding = ctx._prec_rounding
if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'):
try:
v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding)
return ctx.make_mpf(v)
except ComplexResult:
pass
if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero)
else: a = a._mpc_
if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero)
else: b = b._mpc_
return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding))
def bernoulli(ctx, n):
return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding))
def _zeta_int(ctx, n):
return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding))
def atan2(ctx, y, x):
x = ctx.convert(x)
y = ctx.convert(y)
return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding))
def psi(ctx, m, z):
z = ctx.convert(z)
m = int(m)
if ctx._is_real_type(z):
return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding))
else:
return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding))
def cos_sin(ctx, x, **kwargs):
if type(x) not in ctx.types:
x = ctx.convert(x)
prec, rounding = ctx._parse_prec(kwargs)
if hasattr(x, '_mpf_'):
c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding)
return ctx.make_mpf(c), ctx.make_mpf(s)
elif hasattr(x, '_mpc_'):
c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding)
return ctx.make_mpc(c), ctx.make_mpc(s)
else:
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
def cospi_sinpi(ctx, x, **kwargs):
if type(x) not in ctx.types:
x = ctx.convert(x)
prec, rounding = ctx._parse_prec(kwargs)
if hasattr(x, '_mpf_'):
c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding)
return ctx.make_mpf(c), ctx.make_mpf(s)
elif hasattr(x, '_mpc_'):
c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding)
return ctx.make_mpc(c), ctx.make_mpc(s)
else:
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
def clone(ctx):
"""
Create a copy of the context, with the same working precision.
"""
a = ctx.__class__()
a.prec = ctx.prec
return a
# Several helper methods
# TODO: add more of these, make consistent, write docstrings, ...
def _is_real_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return False
return True
def _is_complex_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return True
return False
def isnan(ctx, x):
"""
Return *True* if *x* is a NaN (not-a-number), or for a complex
number, whether either the real or complex part is NaN;
otherwise return *False*::
>>> from sympy.mpmath import *
>>> isnan(3.14)
False
>>> isnan(nan)
True
>>> isnan(mpc(3.14,2.72))
False
>>> isnan(mpc(3.14,nan))
True
"""
if hasattr(x, "_mpf_"):
return x._mpf_ == fnan
if hasattr(x, "_mpc_"):
return fnan in x._mpc_
if isinstance(x, int_types) or isinstance(x, rational.mpq):
return False
x = ctx.convert(x)
if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
return ctx.isnan(x)
raise TypeError("isnan() needs a number as input")
def isfinite(ctx, x):
"""
Return *True* if *x* is a finite number, i.e. neither
an infinity or a NaN.
>>> from sympy.mpmath import *
>>> isfinite(inf)
False
>>> isfinite(-inf)
False
>>> isfinite(3)
True
>>> isfinite(nan)
False
>>> isfinite(3+4j)
True
>>> isfinite(mpc(3,inf))
False
>>> isfinite(mpc(nan,3))
False
"""
if ctx.isinf(x) or ctx.isnan(x):
return False
return True
def isnpint(ctx, x):
"""
Determine if *x* is a nonpositive integer.
"""
if not x:
return True
if hasattr(x, '_mpf_'):
sign, man, exp, bc = x._mpf_
return sign and exp >= 0
if hasattr(x, '_mpc_'):
return not x.imag and ctx.isnpint(x.real)
if type(x) in int_types:
return x <= 0
if isinstance(x, ctx.mpq):
p, q = x._mpq_
if not p:
return True
return q == 1 and p <= 0
return ctx.isnpint(ctx.convert(x))
def __str__(ctx):
lines = ["Mpmath settings:",
(" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]",
(" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]",
(" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]",
]
return "\n".join(lines)
@property
def _repr_digits(ctx):
return repr_dps(ctx._prec)
@property
def _str_digits(ctx):
return ctx._dps
def extraprec(ctx, n, normalize_output=False):
"""
The block
with extraprec(n):
<code>
increases the precision n bits, executes <code>, and then
restores the precision.
extraprec(n)(f) returns a decorated version of the function f
that increases the working precision by n bits before execution,
and restores the parent precision afterwards. With
normalize_output=True, it rounds the return value to the parent
precision.
"""
return PrecisionManager(ctx, lambda p: p + n, None, normalize_output)
def extradps(ctx, n, normalize_output=False):
"""
This function is analogous to extraprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: d + n, normalize_output)
def workprec(ctx, n, normalize_output=False):
"""
The block
with workprec(n):
<code>
sets the precision to n bits, executes <code>, and then restores
the precision.
workprec(n)(f) returns a decorated version of the function f
that sets the precision to n bits before execution,
and restores the precision afterwards. With normalize_output=True,
it rounds the return value to the parent precision.
"""
return PrecisionManager(ctx, lambda p: n, None, normalize_output)
def workdps(ctx, n, normalize_output=False):
"""
This function is analogous to workprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: n, normalize_output)
def autoprec(ctx, f, maxprec=None, catch=(), verbose=False):
"""
Return a wrapped copy of *f* that repeatedly evaluates *f*
with increasing precision until the result converges to the
full precision used at the point of the call.
This heuristically protects against rounding errors, at the cost of
roughly a 2x slowdown compared to manually setting the optimal
precision. This method can, however, easily be fooled if the results
from *f* depend "discontinuously" on the precision, for instance
if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec`
should be used judiciously.
**Examples**
Many functions are sensitive to perturbations of the input arguments.
If the arguments are decimal numbers, they may have to be converted
to binary at a much higher precision. If the amount of required
extra precision is unknown, :func:`~mpmath.autoprec` is convenient::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> mp.pretty = True
>>> besselj(5, 125 * 10**28) # Exact input
-8.03284785591801e-17
>>> besselj(5, '1.25e30') # Bad
7.12954868316652e-16
>>> autoprec(besselj)(5, '1.25e30') # Good
-8.03284785591801e-17
The following fails to converge because `\sin(\pi) = 0` whereas all
finite-precision approximations of `\pi` give nonzero values::
>>> autoprec(sin)(pi)
Traceback (most recent call last):
...
NoConvergence: autoprec: prec increased to 2910 without convergence
As the following example shows, :func:`~mpmath.autoprec` can protect against
cancellation, but is fooled by too severe cancellation::
>>> x = 1e-10
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
1.00000008274037e-10
1.00000000005e-10
1.00000000005e-10
>>> x = 1e-50
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
0.0
1.0e-50
0.0
With *catch*, an exception or list of exceptions to intercept
may be specified. The raised exception is interpreted
as signaling insufficient precision. This permits, for example,
evaluating a function where a too low precision results in a
division by zero::
>>> f = lambda x: 1/(exp(x)-1)
>>> f(1e-30)
Traceback (most recent call last):
...
ZeroDivisionError
>>> autoprec(f, catch=ZeroDivisionError)(1e-30)
1.0e+30
"""
def f_autoprec_wrapped(*args, **kwargs):
prec = ctx.prec
if maxprec is None:
maxprec2 = ctx._default_hyper_maxprec(prec)
else:
maxprec2 = maxprec
try:
ctx.prec = prec + 10
try:
v1 = f(*args, **kwargs)
except catch:
v1 = ctx.nan
prec2 = prec + 20
while 1:
ctx.prec = prec2
try:
v2 = f(*args, **kwargs)
except catch:
v2 = ctx.nan
if v1 == v2:
break
err = ctx.mag(v2-v1) - ctx.mag(v2)
if err < (-prec):
break
if verbose:
print("autoprec: target=%s, prec=%s, accuracy=%s" \
% (prec, prec2, -err))
v1 = v2
if prec2 >= maxprec2:
raise ctx.NoConvergence(\
"autoprec: prec increased to %i without convergence"\
% prec2)
prec2 += int(prec2*2)
prec2 = min(prec2, maxprec2)
finally:
ctx.prec = prec
return +v2
return f_autoprec_wrapped
def nstr(ctx, x, n=6, **kwargs):
"""
Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n*
significant digits. The small default value for *n* is chosen to
make this function useful for printing collections of numbers
(lists, matrices, etc).
If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively
to each element. For unrecognized classes, :func:`~mpmath.nstr`
simply returns ``str(x)``.
The companion function :func:`~mpmath.nprint` prints the result
instead of returning it.
>>> from sympy.mpmath import *
>>> nstr([+pi, ldexp(1,-500)])
'[3.14159, 3.05494e-151]'
>>> nprint([+pi, ldexp(1,-500)])
[3.14159, 3.05494e-151]
"""
if isinstance(x, list):
return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
if isinstance(x, tuple):
return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
if hasattr(x, '_mpf_'):
return to_str(x._mpf_, n, **kwargs)
if hasattr(x, '_mpc_'):
return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")"
if isinstance(x, basestring):
return repr(x)
if isinstance(x, ctx.matrix):
return x.__nstr__(n, **kwargs)
return str(x)
def _convert_fallback(ctx, x, strings):
if strings and isinstance(x, basestring):
if 'j' in x.lower():
x = x.lower().replace(' ', '')
match = get_complex.match(x)
re = match.group('re')
if not re:
re = 0
im = match.group('im').rstrip('j')
return ctx.mpc(ctx.convert(re), ctx.convert(im))
if hasattr(x, "_mpi_"):
a, b = x._mpi_
if a == b:
return ctx.make_mpf(a)
else:
raise ValueError("can only create mpf from zero-width interval")
raise TypeError("cannot create mpf from " + repr(x))
def mpmathify(ctx, *args, **kwargs):
return ctx.convert(*args, **kwargs)
def _parse_prec(ctx, kwargs):
if kwargs:
if kwargs.get('exact'):
return 0, 'f'
prec, rounding = ctx._prec_rounding
if 'rounding' in kwargs:
rounding = kwargs['rounding']
if 'prec' in kwargs:
prec = kwargs['prec']
if prec == ctx.inf:
return 0, 'f'
else:
prec = int(prec)
elif 'dps' in kwargs:
dps = kwargs['dps']
if dps == ctx.inf:
return 0, 'f'
prec = dps_to_prec(dps)
return prec, rounding
return ctx._prec_rounding
_exact_overflow_msg = "the exact result does not fit in memory"
_hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy
using a working precision of %i bits. Try with a higher maxprec,
maxterms, or set zeroprec."""
def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs):
if hasattr(z, "_mpf_"):
key = p, q, flags, 'R'
v = z._mpf_
elif hasattr(z, "_mpc_"):
key = p, q, flags, 'C'
v = z._mpc_
if key not in ctx.hyp_summators:
ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1]
summator = ctx.hyp_summators[key]
prec = ctx.prec
maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec))
extraprec = 50
epsshift = 25
# Jumps in magnitude occur when parameters are close to negative
# integers. We must ensure that these terms are included in
# the sum and added accurately
magnitude_check = {}
max_total_jump = 0
for i, c in enumerate(coeffs):
if flags[i] == 'Z':
if i >= p and c <= 0:
ok = False
for ii, cc in enumerate(coeffs[:p]):
# Note: c <= cc or c < cc, depending on convention
if flags[ii] == 'Z' and cc <= 0 and c <= cc:
ok = True
if not ok:
raise ZeroDivisionError("pole in hypergeometric series")
continue
n, d = ctx.nint_distance(c)
n = -int(n)
d = -d
if i >= p and n >= 0 and d > 4:
if n in magnitude_check:
magnitude_check[n] += d
else:
magnitude_check[n] = d
extraprec = max(extraprec, d - prec + 60)
max_total_jump += abs(d)
while 1:
if extraprec > maxprec:
raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec))
wp = prec + extraprec
if magnitude_check:
mag_dict = dict((n,None) for n in magnitude_check)
else:
mag_dict = {}
zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \
epsshift, mag_dict, **kwargs)
cancel = -magnitude
jumps_resolved = True
if extraprec < max_total_jump:
for n in mag_dict.values():
if (n is None) or (n < prec):
jumps_resolved = False
break
accurate = (cancel < extraprec-25-5 or not accurate_small)
if jumps_resolved:
if accurate:
break
# zero?
zeroprec = kwargs.get('zeroprec')
if zeroprec is not None:
if cancel > zeroprec:
if have_complex:
return ctx.mpc(0)
else:
return ctx.zero
# Some near-singularities were not included, so increase
# precision and repeat until they are
extraprec *= 2
# Possible workaround for bad roundoff in fixed-point arithmetic
epsshift += 5
extraprec += 5
if type(zv) is tuple:
if have_complex:
return ctx.make_mpc(zv)
else:
return ctx.make_mpf(zv)
else:
return zv
def ldexp(ctx, x, n):
r"""
Computes `x 2^n` efficiently. No rounding is performed.
The argument `x` must be a real floating-point number (or
possible to convert into one) and `n` must be a Python ``int``.
>>> from sympy.mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> ldexp(1, 10)
mpf('1024.0')
>>> ldexp(1, -3)
mpf('0.125')
"""
x = ctx.convert(x)
return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n))
def frexp(ctx, x):
r"""
Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`,
`n` a Python integer, and such that `x = y 2^n`. No rounding is
performed.
>>> from sympy.mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> frexp(7.5)
(mpf('0.9375'), 3)
"""
x = ctx.convert(x)
y, n = libmp.mpf_frexp(x._mpf_)
return ctx.make_mpf(y), n
def fneg(ctx, x, **kwargs):
"""
Negates the number *x*, giving a floating-point result, optionally
using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
An mpmath number is returned::
>>> from sympy.mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fneg(2.5)
mpf('-2.5')
>>> fneg(-5+2j)
mpc(real='5.0', imag='-2.0')
Precise control over rounding is possible::
>>> x = fadd(2, 1e-100, exact=True)
>>> fneg(x)
mpf('-2.0')
>>> fneg(x, rounding='f')
mpf('-2.0000000000000004')
Negating with and without roundoff::
>>> n = 200000000000000000000001
>>> print(int(-mpf(n)))
-200000000000000016777216
>>> print(int(fneg(n)))
-200000000000000016777216
>>> print(int(fneg(n, prec=log(n,2)+1)))
-200000000000000000000001
>>> print(int(fneg(n, dps=log(n,10)+1)))
-200000000000000000000001
>>> print(int(fneg(n, prec=inf)))
-200000000000000000000001
>>> print(int(fneg(n, dps=inf)))
-200000000000000000000001
>>> print(int(fneg(n, exact=True)))
-200000000000000000000001
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
if hasattr(x, '_mpf_'):
return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding))
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fadd(ctx, x, y, **kwargs):
"""
Adds the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
The default precision is the working precision of the context.
You can specify a custom precision in bits by passing the *prec* keyword
argument, or by providing an equivalent decimal precision with the *dps*
keyword argument. If the precision is set to ``+inf``, or if the flag
*exact=True* is passed, an exact addition with no rounding is performed.
When the precision is finite, the optional *rounding* keyword argument
specifies the direction of rounding. Valid options are ``'n'`` for
nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'``
for down, ``'u'`` for up.
**Examples**
Using :func:`~mpmath.fadd` with precision and rounding control::
>>> from sympy.mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fadd(2, 1e-20)
mpf('2.0')
>>> fadd(2, 1e-20, rounding='u')
mpf('2.0000000000000004')
>>> nprint(fadd(2, 1e-20, prec=100), 25)
2.00000000000000000001
>>> nprint(fadd(2, 1e-20, dps=15), 25)
2.0
>>> nprint(fadd(2, 1e-20, dps=25), 25)
2.00000000000000000001
>>> nprint(fadd(2, 1e-20, exact=True), 25)
2.00000000000000000001
Exact addition avoids cancellation errors, enforcing familiar laws
of numbers such as `x+y-x = y`, which don't hold in floating-point
arithmetic with finite precision::
>>> x, y = mpf(2), mpf('1e-1000')
>>> print(x + y - x)
0.0
>>> print(fadd(x, y, prec=inf) - x)
1.0e-1000
>>> print(fadd(x, y, exact=True) - x)
1.0e-1000
Exact addition can be inefficient and may be impossible to perform
with large magnitude differences::
>>> fadd(1, '1e-100000000000000000000', prec=inf)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fsub(ctx, x, y, **kwargs):
"""
Subtracts the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
Using :func:`~mpmath.fsub` with precision and rounding control::
>>> from sympy.mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fsub(2, 1e-20)
mpf('2.0')
>>> fsub(2, 1e-20, rounding='d')
mpf('1.9999999999999998')
>>> nprint(fsub(2, 1e-20, prec=100), 25)
1.99999999999999999999
>>> nprint(fsub(2, 1e-20, dps=15), 25)
2.0
>>> nprint(fsub(2, 1e-20, dps=25), 25)
1.99999999999999999999
>>> nprint(fsub(2, 1e-20, exact=True), 25)
1.99999999999999999999
Exact subtraction avoids cancellation errors, enforcing familiar laws
of numbers such as `x-y+y = x`, which don't hold in floating-point
arithmetic with finite precision::
>>> x, y = mpf(2), mpf('1e1000')
>>> print(x - y + y)
0.0
>>> print(fsub(x, y, prec=inf) + y)
2.0
>>> print(fsub(x, y, exact=True) + y)
2.0
Exact addition can be inefficient and may be impossible to perform
with large magnitude differences::
>>> fsub(1, '1e-100000000000000000000', prec=inf)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fmul(ctx, x, y, **kwargs):
"""
Multiplies the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
The result is an mpmath number::
>>> from sympy.mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fmul(2, 5.0)
mpf('10.0')
>>> fmul(0.5j, 0.5)
mpc(real='0.0', imag='0.25')
Avoiding roundoff::
>>> x, y = 10**10+1, 10**15+1
>>> print(x*y)
10000000001000010000000001
>>> print(mpf(x) * mpf(y))
1.0000000001e+25
>>> print(int(mpf(x) * mpf(y)))
10000000001000011026399232
>>> print(int(fmul(x, y)))
10000000001000011026399232
>>> print(int(fmul(x, y, dps=25)))
10000000001000010000000001
>>> print(int(fmul(x, y, exact=True)))
10000000001000010000000001
Exact multiplication with complex numbers can be inefficient and may
be impossible to perform with large magnitude differences between
real and imaginary parts::
>>> x = 1+2j
>>> y = mpc(2, '1e-100000000000000000000')
>>> fmul(x, y)
mpc(real='2.0', imag='4.0')
>>> fmul(x, y, rounding='u')
mpc(real='2.0', imag='4.0000000000000009')
>>> fmul(x, y, exact=True)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fdiv(ctx, x, y, **kwargs):
"""
Divides the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
The result is an mpmath number::
>>> from sympy.mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fdiv(3, 2)
mpf('1.5')
>>> fdiv(2, 3)
mpf('0.66666666666666663')
>>> fdiv(2+4j, 0.5)
mpc(real='4.0', imag='8.0')
The rounding direction and precision can be controlled::
>>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits
mpf('0.6666259765625')
>>> fdiv(2, 3, rounding='d')
mpf('0.66666666666666663')
>>> fdiv(2, 3, prec=60)
mpf('0.66666666666666667')
>>> fdiv(2, 3, rounding='u')
mpf('0.66666666666666674')
Checking the error of a division by performing it at higher precision::
>>> fdiv(2, 3) - fdiv(2, 3, prec=100)
mpf('-3.7007434154172148e-17')
Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not
allowed since the quotient of two floating-point numbers generally
does not have an exact floating-point representation. (In the
future this might be changed to allow the case where the division
is actually exact.)
>>> fdiv(2, 3, exact=True)
Traceback (most recent call last):
...
ValueError: division is not an exact operation
"""
prec, rounding = ctx._parse_prec(kwargs)
if not prec:
raise ValueError("division is not an exact operation")
x = ctx.convert(x)
y = ctx.convert(y)
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding))
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def nint_distance(ctx, x):
r"""
Return `(n,d)` where `n` is the nearest integer to `x` and `d` is
an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision
(measured in bits) lost to cancellation when computing `x-n`.
>>> from sympy.mpmath import *
>>> n, d = nint_distance(5)
>>> print(n); print(d)
5
-inf
>>> n, d = nint_distance(mpf(5))
>>> print(n); print(d)
5
-inf
>>> n, d = nint_distance(mpf(5.00000001))
>>> print(n); print(d)
5
-26
>>> n, d = nint_distance(mpf(4.99999999))
>>> print(n); print(d)
5
-26
>>> n, d = nint_distance(mpc(5,10))
>>> print(n); print(d)
5
4
>>> n, d = nint_distance(mpc(5,0.000001))
>>> print(n); print(d)
5
-19
"""
typx = type(x)
if typx in int_types:
return int(x), ctx.ninf
elif typx is rational.mpq:
p, q = x._mpq_
n, r = divmod(p, q)
if 2*r >= q:
n += 1
elif not r:
return n, ctx.ninf
# log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q)
d = bitcount(abs(p-n*q)) - bitcount(q)
return n, d
if hasattr(x, "_mpf_"):
re = x._mpf_
im_dist = ctx.ninf
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
isign, iman, iexp, ibc = im
if iman:
im_dist = iexp + ibc
elif im == fzero:
im_dist = ctx.ninf
else:
raise ValueError("requires a finite number")
else:
x = ctx.convert(x)
if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
return ctx.nint_distance(x)
else:
raise TypeError("requires an mpf/mpc")
sign, man, exp, bc = re
mag = exp+bc
# |x| < 0.5
if mag < 0:
n = 0
re_dist = mag
elif man:
# exact integer
if exp >= 0:
n = man << exp
re_dist = ctx.ninf
# exact half-integer
elif exp == -1:
n = (man>>1)+1
re_dist = 0
else:
d = (-exp-1)
t = man >> d
if t & 1:
t += 1
man = (t<<d) - man
else:
man -= (t<<d)
n = t>>1 # int(t)>>1
re_dist = exp+bitcount(man)
if sign:
n = -n
elif re == fzero:
re_dist = ctx.ninf
n = 0
else:
raise ValueError("requires a finite number")
return n, max(re_dist, im_dist)
def fprod(ctx, factors):
r"""
Calculates a product containing a finite number of factors (for
infinite products, see :func:`~mpmath.nprod`). The factors will be
converted to mpmath numbers.
>>> from sympy.mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fprod([1, 2, 0.5, 7])
mpf('7.0')
"""
orig = ctx.prec
try:
v = ctx.one
for p in factors:
v *= p
finally:
ctx.prec = orig
return +v
def rand(ctx):
"""
Returns an ``mpf`` with value chosen randomly from `[0, 1)`.
The number of randomly generated bits in the mantissa is equal
to the working precision.
"""
return ctx.make_mpf(mpf_rand(ctx._prec))
def fraction(ctx, p, q):
"""
Given Python integers `(p, q)`, returns a lazy ``mpf`` representing
the fraction `p/q`. The value is updated with the precision.
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> a = fraction(1,100)
>>> b = mpf(1)/100
>>> print(a); print(b)
0.01
0.01
>>> mp.dps = 30
>>> print(a); print(b) # a will be accurate
0.01
0.0100000000000000002081668171172
>>> mp.dps = 15
"""
return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd),
'%s/%s' % (p, q))
def absmin(ctx, x):
return abs(ctx.convert(x))
def absmax(ctx, x):
return abs(ctx.convert(x))
def _as_points(ctx, x):
# XXX: remove this?
if hasattr(x, '_mpi_'):
a, b = x._mpi_
return [ctx.make_mpf(a), ctx.make_mpf(b)]
return x
'''
def _zetasum(ctx, s, a, b):
"""
Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small
integers.
"""
a = int(a)
b = int(b)
s = ctx.convert(s)
prec, rounding = ctx._prec_rounding
if hasattr(s, '_mpf_'):
v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec))
elif hasattr(s, '_mpc_'):
v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec))
return v
'''
def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False):
if not (ctx.isint(a) and hasattr(s, "_mpc_")):
raise NotImplementedError
a = int(a)
prec = ctx._prec
xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec)
xs = [ctx.make_mpc(x) for x in xs]
ys = [ctx.make_mpc(y) for y in ys]
return xs, ys
class PrecisionManager:
def __init__(self, ctx, precfun, dpsfun, normalize_output=False):
self.ctx = ctx
self.precfun = precfun
self.dpsfun = dpsfun
self.normalize_output = normalize_output
def __call__(self, f):
def g(*args, **kwargs):
orig = self.ctx.prec
try:
if self.precfun:
self.ctx.prec = self.precfun(self.ctx.prec)
else:
self.ctx.dps = self.dpsfun(self.ctx.dps)
if self.normalize_output:
v = f(*args, **kwargs)
if type(v) is tuple:
return tuple([+a for a in v])
return +v
else:
return f(*args, **kwargs)
finally:
self.ctx.prec = orig
g.__name__ = f.__name__
g.__doc__ = f.__doc__
return g
def __enter__(self):
self.origp = self.ctx.prec
if self.precfun:
self.ctx.prec = self.precfun(self.ctx.prec)
else:
self.ctx.dps = self.dpsfun(self.ctx.dps)
def __exit__(self, exc_type, exc_val, exc_tb):
self.ctx.prec = self.origp
return False
if __name__ == '__main__':
import doctest
doctest.testmod()
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/sympy/mpmath/ctx_mp.py
|
Python
|
mit
| 48,884
|
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The eos l3_interfaces fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from copy import deepcopy
import re
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.eos.argspec.l3_interfaces.l3_interfaces import L3_interfacesArgs
class L3_interfacesFacts(object):
""" The eos l3_interfaces fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = L3_interfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for l3_interfaces
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected configuration
:rtype: dictionary
:returns: facts
"""
if not data:
data = connection.get('show running-config | section ^interface')
# split the config into instances of the resource
resource_delim = 'interface'
find_pattern = r'(?:^|\n)%s.*?(?=(?:^|\n)%s|$)' % (resource_delim, resource_delim)
resources = [p.strip() for p in re.findall(find_pattern, data, re.DOTALL)]
objs = []
for resource in resources:
if resource:
obj = self.render_config(self.generated_spec, resource)
if obj:
objs.append(obj)
facts = {}
if objs:
params = utils.validate_config(self.argument_spec, {'config': objs})
facts['l3_interfaces'] = [utils.remove_empties(cfg) for cfg in params['config']]
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
config['name'] = utils.parse_conf_arg(conf, 'interface')
matches = re.findall(r'.*ip address (.+)$', conf, re.MULTILINE)
if matches:
config["ipv4"] = []
for match in matches:
address, dummy, remainder = match.partition(" ")
ipv4 = {"address": address}
if remainder == "secondary":
ipv4["secondary"] = True
config['ipv4'].append(ipv4)
matches = re.findall(r'.*ipv6 address (.+)$', conf, re.MULTILINE)
if matches:
config["ipv6"] = []
for match in matches:
address, dummy, remainder = match.partition(" ")
ipv6 = {"address": address}
config['ipv6'].append(ipv6)
return utils.remove_empties(config)
|
Dhivyap/ansible
|
lib/ansible/module_utils/network/eos/facts/l3_interfaces/l3_interfaces.py
|
Python
|
gpl-3.0
| 3,622
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contrib hooks are not imported by default. They should be accessed
# directly: from airflow.contrib.hooks.hook_module import Hook
import sys
# ------------------------------------------------------------------------
#
# #TODO #FIXME Airflow 2.0
#
# Old import machinary below.
#
# This is deprecated but should be kept until Airflow 2.0
# for compatibility.
#
# ------------------------------------------------------------------------
_hooks = {
'ftp_hook': ['FTPHook'],
'ftps_hook': ['FTPSHook'],
'vertica_hook': ['VerticaHook'],
'ssh_hook': ['SSHHook'],
'bigquery_hook': ['BigQueryHook'],
'qubole_hook': ['QuboleHook'],
'gcs_hook': ['GoogleCloudStorageHook'],
'datastore_hook': ['DatastoreHook'],
'gcp_dataproc_hook': ['DataProcHook'],
'gcp_dataflow_hook': ['DataFlowHook'],
'cloudant_hook': ['CloudantHook'],
'fs_hook': ['FSHook']
}
import os as _os
if not _os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from airflow.utils.helpers import AirflowImporter
airflow_importer = AirflowImporter(sys.modules[__name__], _hooks)
|
forevernull/incubator-airflow
|
airflow/contrib/hooks/__init__.py
|
Python
|
apache-2.0
| 1,660
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import fields, osv
from openerp import api
class account_analytic_default(osv.osv):
_name = "account.analytic.default"
_description = "Analytic Distribution"
_rec_name = "analytic_id"
_order = "sequence"
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of analytic distribution"),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account', domain=[('account_type', '=', 'normal')]),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Select a product which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this product, it will automatically take this as an analytic account)"),
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='cascade', help="Select a partner which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this partner, it will automatically take this as an analytic account)"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', help="Select a user which will use analytic account specified in analytic default."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', help="Select a company which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this company, it will automatically take this as an analytic account)"),
'date_start': fields.date('Start Date', help="Default start date for this Analytic Account."),
'date_stop': fields.date('End Date', help="Default end date for this Analytic Account."),
}
def account_get(self, cr, uid, product_id=None, partner_id=None, user_id=None, date=None, company_id=None, context=None):
domain = []
if product_id:
domain += ['|', ('product_id', '=', product_id)]
domain += [('product_id','=', False)]
if partner_id:
domain += ['|', ('partner_id', '=', partner_id)]
domain += [('partner_id', '=', False)]
if company_id:
domain += ['|', ('company_id', '=', company_id)]
domain += [('company_id', '=', False)]
if user_id:
domain += ['|',('user_id', '=', user_id)]
domain += [('user_id','=', False)]
if date:
domain += ['|', ('date_start', '<=', date), ('date_start', '=', False)]
domain += ['|', ('date_stop', '>=', date), ('date_stop', '=', False)]
best_index = -1
res = False
for rec in self.browse(cr, uid, self.search(cr, uid, domain, context=context), context=context):
index = 0
if rec.product_id: index += 1
if rec.partner_id: index += 1
if rec.company_id: index += 1
if rec.user_id: index += 1
if rec.date_start: index += 1
if rec.date_stop: index += 1
if index > best_index:
res = rec
best_index = index
return res
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_description = "Invoice Line"
@api.onchange('product_id')
def _onchange_product_id(self):
res = super(account_invoice_line, self)._onchange_product_id()
rec = self.env['account.analytic.default'].account_get(self.product_id.id, self.invoice_id.partner_id.id, self._uid,
time.strftime('%Y-%m-%d'), company_id=self.company_id.id, context=self._context)
if rec:
self.account_analytic_id = rec.analytic_id.id
else:
self.account_analytic_id = False
return res
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_account_analytic_invoice(self, cursor, user, picking, move_line):
partner_id = picking.partner_id and picking.partner_id.id or False
rec = self.pool.get('account.analytic.default').account_get(cursor, user, move_line.product_id.id, partner_id, user, time.strftime('%Y-%m-%d'))
if rec:
return rec.analytic_id.id
return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
@api.multi
def _prepare_invoice_line(self, qty):
res = super(sale_order_line, self)._prepare_invoice_line(qty)
default_analytic_account = self.env['account.analytic.default'].account_get(self.product_id.id, self.order_id.partner_id.id, self.order_id.user_id.id, time.strftime('%Y-%m-%d'))
if default_analytic_account:
res.update({'account_analytic_id': default_analytic_account.analytic_id.id})
return res
class product_product(osv.Model):
_inherit = 'product.product'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
Analytic = self.pool['account.analytic.default']
return {
product_id: Analytic.search_count(cr, uid, [('product_id', '=', product_id)], context=context)
for product_id in ids
}
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
class product_template(osv.Model):
_inherit = 'product.template'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for product_tmpl_id in self.browse(cr, uid, ids, context=context):
res[product_tmpl_id.id] = sum([p.rules_count for p in product_tmpl_id.product_variant_ids])
return res
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
def action_view_rules(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'account_analytic_default.action_product_default_list', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
# Remove context so it is not going to filter on product_id with active_id of template
result['context'] = "{}"
return result
class stock_move(osv.Model):
_inherit = 'stock.move'
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
# It will set the default analtyic account on the invoice line
partner_id = self.pool['account.invoice'].browse(cr, uid, invoice_line_vals.get('invoice_id'), context=context).partner_id.id
if 'account_analytic_id' not in invoice_line_vals or not invoice_line_vals.get('account_analytic_id'):
rec = self.pool['account.analytic.default'].account_get(cr, uid, move.product_id.id, partner_id, uid, time.strftime('%Y-%m-%d'), company_id=move.company_id.id, context=context)
if rec:
invoice_line_vals.update({'account_analytic_id': rec.analytic_id.id})
res = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
return res
|
vileopratama/vitech
|
src/addons/account_analytic_default/account_analytic_default.py
|
Python
|
mit
| 7,388
|
# Copyright Joel de Guzman 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> import pointer_vector_ext
>>> d = pointer_vector_ext.DoesSomething()
>>> lst = d.returnList()
>>> lst[0].f();
'harru'
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print('running...')
import sys
status = run()[0]
if (status == 0): print("Done.")
sys.exit(status)
|
satya-das/common
|
third_party/boost_tp/libs/python/test/pointer_vector.py
|
Python
|
mit
| 628
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: asa_config
version_added: "2.2"
author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
short_description: Manage Cisco ASA configuration sections
description:
- Cisco ASA configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ASA configuration sections in
a deterministic way.
extends_documentation_fragment: asa
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines).
required: false
default: null
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct
required: false
default: line
choices: ['line', 'block']
update:
description:
- The I(update) argument controls how the configuration statements
are processed on the remote device. Valid choices for the I(update)
argument are I(merge) and I(check). When the argument is set to
I(merge), the configuration changes are merged with the current
device running configuration. When the argument is set to I(check)
the configuration updates are determined but not actually configured
on the remote device.
required: false
default: merge
choices: ['merge', 'check']
commit:
description:
- This argument specifies the update method to use when applying the
configuration changes to the remote node. If the value is set to
I(merge) the configuration updates are merged with the running-
config. If the value is set to I(check), no changes are made to
the remote host.
required: false
default: merge
choices: ['merge', 'check']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
config:
description:
- The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
required: false
default: null
defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
the module will get the current config by issuing the command
C(show running-config all).
required: false
default: no
choices: ['yes', 'no']
passwords:
description:
- This argument specifies to include passwords in the config
when retrieving the running-config from the remote device. This
includes passwords related to VPN endpoints. This argument is
mutually exclusive with I(defaults).
required: false
default: no
choices: ['yes', 'no']
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
required: false
default: no
choices: ['yes', 'no']
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: cisco
password: cisco
authorize: yes
auth_pass: cisco
transport: cli
- asa_config:
lines:
- network-object host 10.80.30.18
- network-object host 10.80.30.19
- network-object host 10.80.30.20
parents: ['object-group network OG-MONITORED-SERVERS']
provider: "{{ cli }}"
- asa_config:
host: "{{ inventory_hostname }}"
lines:
- message-length maximum client auto
- message-length maximum 512
match: line
parents: ['policy-map type inspect dns PM-DNS', 'parameters']
authorize: yes
auth_pass: cisco
username: admin
password: cisco
context: ansible
- asa_config:
lines:
- ikev1 pre-shared-key MyS3cretVPNK3y
parents: tunnel-group 1.1.1.1 ipsec-attributes
passwords: yes
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/asa_config.2016-07-16@22:28:34
responses:
description: The set of responses from issuing the commands on the device
returned: when not check_mode
type: list
sample: ['...', '...']
"""
import re
import ansible.module_utils.asa
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig, dumps
def get_config(module):
contents = module.params['config']
if not contents:
if module.params['defaults']:
include = 'defaults'
elif module.params['passwords']:
include = 'passwords'
else:
include = None
contents = module.config.get_config(include=include)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
path = module.params['parents']
candidate = get_candidate(module)
if match != 'none':
config = get_config(module)
configobjs = candidate.difference(config, path=path, match=match,
replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
module.config.load_config(commands)
result['changed'] = True
if module.params['save']:
if not module.check_mode:
module.config.save_config()
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
config=dict(),
defaults=dict(type='bool', default=False),
passwords=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
mutually_exclusive = [('lines', 'src'), ('defaults', 'passwords')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
result = dict(changed=False)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
bcoca/ansible-modules-extras
|
network/asa/asa_config.py
|
Python
|
gpl-3.0
| 11,915
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The implementations of the hdfs clients. The hadoop cli client and the
snakebite client.
"""
from luigi.contrib.hdfs import config as hdfs_config
from luigi.contrib.hdfs import snakebite_client as hdfs_snakebite_client
from luigi.contrib.hdfs import hadoopcli_clients as hdfs_hadoopcli_clients
import luigi.contrib.target
import logging
logger = logging.getLogger('luigi-interface')
def get_autoconfig_client(show_warnings=True):
"""
Creates the client as specified in the `luigi.cfg` configuration.
"""
configured_client = hdfs_config.get_configured_hdfs_client(show_warnings=show_warnings)
if configured_client == "snakebite":
return hdfs_snakebite_client.SnakebiteHdfsClient()
if configured_client == "snakebite_with_hadoopcli_fallback":
return luigi.contrib.target.CascadingClient([hdfs_snakebite_client.SnakebiteHdfsClient(),
hdfs_hadoopcli_clients.create_hadoopcli_client()])
if configured_client == "hadoopcli":
return hdfs_hadoopcli_clients.create_hadoopcli_client()
raise Exception("Unknown hdfs client " + hdfs_config.get_configured_hdfs_client())
# Suppress warnings so that importing luigi.contrib.hdfs doesn't show a deprecated warning.
client = get_autoconfig_client(show_warnings=False)
exists = client.exists
rename = client.rename
remove = client.remove
mkdir = client.mkdir
listdir = client.listdir
|
ChrisBeaumont/luigi
|
luigi/contrib/hdfs/clients.py
|
Python
|
apache-2.0
| 2,040
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.core import platform
from telemetry.core.platform import platform_backend
# Get build/android scripts into our path.
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../../build/android')))
from pylib import perf_tests_helper # pylint: disable=F0401
from pylib import thermal_throttle # pylint: disable=F0401
try:
from pylib import surface_stats_collector # pylint: disable=F0401
except Exception:
surface_stats_collector = None
class AndroidPlatformBackend(platform_backend.PlatformBackend):
def __init__(self, adb, no_performance_mode):
super(AndroidPlatformBackend, self).__init__()
self._adb = adb
self._surface_stats_collector = None
self._perf_tests_setup = perf_tests_helper.PerfControl(self._adb)
self._thermal_throttle = thermal_throttle.ThermalThrottle(self._adb)
self._no_performance_mode = no_performance_mode
self._raw_display_frame_rate_measurements = []
if self._no_performance_mode:
logging.warning('CPU governor will not be set!')
def IsRawDisplayFrameRateSupported(self):
return True
def StartRawDisplayFrameRateMeasurement(self):
assert not self._surface_stats_collector
# Clear any leftover data from previous timed out tests
self._raw_display_frame_rate_measurements = []
self._surface_stats_collector = \
surface_stats_collector.SurfaceStatsCollector(self._adb)
self._surface_stats_collector.Start()
def StopRawDisplayFrameRateMeasurement(self):
self._surface_stats_collector.Stop()
for r in self._surface_stats_collector.GetResults():
self._raw_display_frame_rate_measurements.append(
platform.Platform.RawDisplayFrameRateMeasurement(
r.name, r.value, r.unit))
self._surface_stats_collector = None
def GetRawDisplayFrameRateMeasurements(self):
ret = self._raw_display_frame_rate_measurements
self._raw_display_frame_rate_measurements = []
return ret
def SetFullPerformanceModeEnabled(self, enabled):
if self._no_performance_mode:
return
if enabled:
self._perf_tests_setup.SetHighPerfMode()
else:
self._perf_tests_setup.SetDefaultPerfMode()
def CanMonitorThermalThrottling(self):
return True
def IsThermallyThrottled(self):
return self._thermal_throttle.IsThrottled()
def HasBeenThermallyThrottled(self):
return self._thermal_throttle.HasBeenThrottled()
def GetSystemCommitCharge(self):
for line in self._adb.RunShellCommand('dumpsys meminfo', log_result=False):
if line.startswith('Total PSS: '):
return int(line.split()[2]) * 1024
return 0
def GetMemoryStats(self, pid):
memory_usage = self._adb.GetMemoryUsageForPid(pid)[0]
return {'ProportionalSetSize': memory_usage['Pss'] * 1024,
'PrivateDirty': memory_usage['Private_Dirty'] * 1024}
def GetIOStats(self, pid):
return {}
def GetChildPids(self, pid):
child_pids = []
ps = self._adb.RunShellCommand('ps', log_result=False)[1:]
for line in ps:
data = line.split()
curr_pid = data[1]
curr_name = data[-1]
if int(curr_pid) == pid:
name = curr_name
for line in ps:
data = line.split()
curr_pid = data[1]
curr_name = data[-1]
if curr_name.startswith(name) and curr_name != name:
child_pids.append(int(curr_pid))
break
return child_pids
def GetCommandLine(self, pid):
ps = self._adb.RunShellCommand('ps', log_result=False)[1:]
for line in ps:
data = line.split()
curr_pid = data[1]
curr_name = data[-1]
if int(curr_pid) == pid:
return curr_name
raise Exception("Could not get command line for %d" % pid)
def GetOSName(self):
return 'android'
def CanFlushIndividualFilesFromSystemCache(self):
return False
def FlushEntireSystemCache(self):
cache_control = perf_tests_helper.CacheControl(self._adb.Adb())
cache_control.DropRamCaches()
def FlushSystemCacheForDirectory(self, directory, ignoring=None):
raise NotImplementedError()
|
SomethingExplosive/android_external_chromium_org
|
tools/telemetry/telemetry/core/platform/android_platform_backend.py
|
Python
|
bsd-3-clause
| 4,335
|
from __future__ import unicode_literals
def account_delete_mark(deletion):
deletion.user.is_active = False
deletion.user.save()
def account_delete_expunge(deletion):
deletion.user.delete()
|
Amechi101/concepteur-market-app
|
venv/lib/python2.7/site-packages/account/callbacks.py
|
Python
|
mit
| 205
|
from functools import wraps
from django.core.exceptions import ObjectDoesNotExist # NOQA
from django.db.models import signals # NOQA
from django.db.models.aggregates import * # NOQA
from django.db.models.deletion import ( # NOQA
CASCADE, DO_NOTHING, PROTECT, SET, SET_DEFAULT, SET_NULL, ProtectedError,
)
from django.db.models.expressions import ( # NOQA
Case, Expression, ExpressionWrapper, F, Func, Value, When,
)
from django.db.models.fields import * # NOQA
from django.db.models.fields.files import FileField, ImageField # NOQA
from django.db.models.fields.proxy import OrderWrt # NOQA
from django.db.models.fields.subclassing import SubfieldBase # NOQA
from django.db.models.lookups import Lookup, Transform # NOQA
from django.db.models.manager import Manager # NOQA
from django.db.models.query import Prefetch, Q, QuerySet # NOQA
# Imports that would create circular imports if sorted
from django.db.models.base import Model # NOQA isort:skip
from django.db.models.fields.related import ( # NOQA isort:skip
ForeignKey, ForeignObject, OneToOneField, ManyToManyField,
ManyToOneRel, ManyToManyRel, OneToOneRel,
)
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
@wraps(func)
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
|
DanteOnline/free-art
|
venv/lib/python3.4/site-packages/django/db/models/__init__.py
|
Python
|
gpl-3.0
| 1,679
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.tools', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## data-output-interface.h (module 'stats'): ns3::DataOutputCallback [class]
module.add_class('DataOutputCallback', allow_subclassing=True, import_from_module='ns.stats')
## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation [class]
module.add_class('DelayJitterEstimation')
## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector [class]
module.add_class('EventGarbageCollector')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## gnuplot.h (module 'tools'): ns3::Gnuplot [class]
module.add_class('Gnuplot')
## gnuplot.h (module 'tools'): ns3::GnuplotCollection [class]
module.add_class('GnuplotCollection')
## gnuplot.h (module 'tools'): ns3::GnuplotDataset [class]
module.add_class('GnuplotDataset')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## data-calculator.h (module 'stats'): ns3::StatisticalSummary [class]
module.add_class('StatisticalSummary', allow_subclassing=True, import_from_module='ns.stats')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset [class]
module.add_class('Gnuplot2dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Style [enumeration]
module.add_enum('Style', ['LINES', 'POINTS', 'LINES_POINTS', 'DOTS', 'IMPULSES', 'STEPS', 'FSTEPS', 'HISTEPS'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::ErrorBars [enumeration]
module.add_enum('ErrorBars', ['NONE', 'X', 'Y', 'XY'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction [class]
module.add_class('Gnuplot2dFunction', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset [class]
module.add_class('Gnuplot3dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction [class]
module.add_class('Gnuplot3dFunction', parent=root_module['ns3::GnuplotDataset'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## data-calculator.h (module 'stats'): ns3::DataCalculator [class]
module.add_class('DataCalculator', import_from_module='ns.stats', parent=root_module['ns3::Object'])
## data-output-interface.h (module 'stats'): ns3::DataOutputInterface [class]
module.add_class('DataOutputInterface', import_from_module='ns.stats', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double> [class]
module.add_class('MinMaxAvgTotalCalculator', import_from_module='ns.stats', template_parameters=['double'], parent=[root_module['ns3::DataCalculator'], root_module['ns3::StatisticalSummary']])
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DataOutputCallback_methods(root_module, root_module['ns3::DataOutputCallback'])
register_Ns3DelayJitterEstimation_methods(root_module, root_module['ns3::DelayJitterEstimation'])
register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Gnuplot_methods(root_module, root_module['ns3::Gnuplot'])
register_Ns3GnuplotCollection_methods(root_module, root_module['ns3::GnuplotCollection'])
register_Ns3GnuplotDataset_methods(root_module, root_module['ns3::GnuplotDataset'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3StatisticalSummary_methods(root_module, root_module['ns3::StatisticalSummary'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Gnuplot2dDataset_methods(root_module, root_module['ns3::Gnuplot2dDataset'])
register_Ns3Gnuplot2dFunction_methods(root_module, root_module['ns3::Gnuplot2dFunction'])
register_Ns3Gnuplot3dDataset_methods(root_module, root_module['ns3::Gnuplot3dDataset'])
register_Ns3Gnuplot3dFunction_methods(root_module, root_module['ns3::Gnuplot3dFunction'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DataCalculator_methods(root_module, root_module['ns3::DataCalculator'])
register_Ns3DataOutputInterface_methods(root_module, root_module['ns3::DataOutputInterface'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3MinMaxAvgTotalCalculator__Double_methods(root_module, root_module['ns3::MinMaxAvgTotalCalculator< double >'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3DataOutputCallback_methods(root_module, cls):
## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback() [constructor]
cls.add_constructor([])
## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback(ns3::DataOutputCallback const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataOutputCallback const &', 'arg0')])
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, int val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('int', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, uint32_t val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('uint32_t', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, double val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('double', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, std::string val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('std::string', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, ns3::Time val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('ns3::Time', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputStatistic(std::string key, std::string variable, ns3::StatisticalSummary const * statSum) [member function]
cls.add_method('OutputStatistic',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('ns3::StatisticalSummary const *', 'statSum')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3DelayJitterEstimation_methods(root_module, cls):
## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation::DelayJitterEstimation(ns3::DelayJitterEstimation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DelayJitterEstimation const &', 'arg0')])
## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation::DelayJitterEstimation() [constructor]
cls.add_constructor([])
## delay-jitter-estimation.h (module 'tools'): ns3::Time ns3::DelayJitterEstimation::GetLastDelay() const [member function]
cls.add_method('GetLastDelay',
'ns3::Time',
[],
is_const=True)
## delay-jitter-estimation.h (module 'tools'): uint64_t ns3::DelayJitterEstimation::GetLastJitter() const [member function]
cls.add_method('GetLastJitter',
'uint64_t',
[],
is_const=True)
## delay-jitter-estimation.h (module 'tools'): static void ns3::DelayJitterEstimation::PrepareTx(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('PrepareTx',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')],
is_static=True)
## delay-jitter-estimation.h (module 'tools'): void ns3::DelayJitterEstimation::RecordRx(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('RecordRx',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
return
def register_Ns3EventGarbageCollector_methods(root_module, cls):
## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector::EventGarbageCollector(ns3::EventGarbageCollector const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')])
## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector::EventGarbageCollector() [constructor]
cls.add_constructor([])
## event-garbage-collector.h (module 'tools'): void ns3::EventGarbageCollector::Track(ns3::EventId event) [member function]
cls.add_method('Track',
'void',
[param('ns3::EventId', 'event')])
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Gnuplot_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot::Gnuplot(ns3::Gnuplot const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot::Gnuplot(std::string const & outputFilename="", std::string const & title="") [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename', default_value='""'), param('std::string const &', 'title', default_value='""')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::AddDataset(ns3::GnuplotDataset const & dataset) [member function]
cls.add_method('AddDataset',
'void',
[param('ns3::GnuplotDataset const &', 'dataset')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::AppendExtra(std::string const & extra) [member function]
cls.add_method('AppendExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'tools'): static std::string ns3::Gnuplot::DetectTerminal(std::string const & filename) [member function]
cls.add_method('DetectTerminal',
'std::string',
[param('std::string const &', 'filename')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot::GenerateOutput(std::ostream & os) const [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetLegend(std::string const & xLegend, std::string const & yLegend) [member function]
cls.add_method('SetLegend',
'void',
[param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
return
def register_Ns3GnuplotCollection_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::GnuplotCollection::GnuplotCollection(ns3::GnuplotCollection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GnuplotCollection const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::GnuplotCollection::GnuplotCollection(std::string const & outputFilename) [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename')])
## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::AddPlot(ns3::Gnuplot const & plot) [member function]
cls.add_method('AddPlot',
'void',
[param('ns3::Gnuplot const &', 'plot')])
## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & os) const [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## gnuplot.h (module 'tools'): ns3::Gnuplot & ns3::GnuplotCollection::GetPlot(unsigned int id) [member function]
cls.add_method('GetPlot',
'ns3::Gnuplot &',
[param('unsigned int', 'id')])
## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
return
def register_Ns3GnuplotDataset_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset const & original) [copy constructor]
cls.add_constructor([param('ns3::GnuplotDataset const &', 'original')])
## gnuplot.h (module 'tools'): static void ns3::GnuplotDataset::SetDefaultExtra(std::string const & extra) [member function]
cls.add_method('SetDefaultExtra',
'void',
[param('std::string const &', 'extra')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::GnuplotDataset::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'tools'): void ns3::GnuplotDataset::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
## gnuplot.h (module 'tools'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset::Data * data) [constructor]
cls.add_constructor([param('ns3::GnuplotDataset::Data *', 'data')],
visibility='protected')
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3StatisticalSummary_methods(root_module, cls):
## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary() [constructor]
cls.add_constructor([])
## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary(ns3::StatisticalSummary const & arg0) [copy constructor]
cls.add_constructor([param('ns3::StatisticalSummary const &', 'arg0')])
## data-calculator.h (module 'stats'): long int ns3::StatisticalSummary::getCount() const [member function]
cls.add_method('getCount',
'long int',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMax() const [member function]
cls.add_method('getMax',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMean() const [member function]
cls.add_method('getMean',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMin() const [member function]
cls.add_method('getMin',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSqrSum() const [member function]
cls.add_method('getSqrSum',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getStddev() const [member function]
cls.add_method('getStddev',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSum() const [member function]
cls.add_method('getSum',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getVariance() const [member function]
cls.add_method('getVariance',
'double',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Gnuplot2dDataset_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(ns3::Gnuplot2dDataset const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot2dDataset const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y, double errorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y, double xErrorDelta, double yErrorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'xErrorDelta'), param('double', 'yErrorDelta')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'tools'): static void ns3::Gnuplot2dDataset::SetDefaultErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetDefaultErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')],
is_static=True)
## gnuplot.h (module 'tools'): static void ns3::Gnuplot2dDataset::SetDefaultStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::SetErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::SetStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')])
return
def register_Ns3Gnuplot2dFunction_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(ns3::Gnuplot2dFunction const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot2dFunction const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Gnuplot3dDataset_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(ns3::Gnuplot3dDataset const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot3dDataset const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::Add(double x, double y, double z) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'z')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'tools'): static void ns3::Gnuplot3dDataset::SetDefaultStyle(std::string const & style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('std::string const &', 'style')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::SetStyle(std::string const & style) [member function]
cls.add_method('SetStyle',
'void',
[param('std::string const &', 'style')])
return
def register_Ns3Gnuplot3dFunction_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(ns3::Gnuplot3dFunction const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot3dFunction const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DataCalculator_methods(root_module, cls):
## data-calculator.h (module 'stats'): ns3::DataCalculator::DataCalculator(ns3::DataCalculator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataCalculator const &', 'arg0')])
## data-calculator.h (module 'stats'): ns3::DataCalculator::DataCalculator() [constructor]
cls.add_constructor([])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## data-calculator.h (module 'stats'): std::string ns3::DataCalculator::GetContext() const [member function]
cls.add_method('GetContext',
'std::string',
[],
is_const=True)
## data-calculator.h (module 'stats'): bool ns3::DataCalculator::GetEnabled() const [member function]
cls.add_method('GetEnabled',
'bool',
[],
is_const=True)
## data-calculator.h (module 'stats'): std::string ns3::DataCalculator::GetKey() const [member function]
cls.add_method('GetKey',
'std::string',
[],
is_const=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Output(ns3::DataOutputCallback & callback) const [member function]
cls.add_method('Output',
'void',
[param('ns3::DataOutputCallback &', 'callback')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::SetContext(std::string const context) [member function]
cls.add_method('SetContext',
'void',
[param('std::string const', 'context')])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::SetKey(std::string const key) [member function]
cls.add_method('SetKey',
'void',
[param('std::string const', 'key')])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Start(ns3::Time const & startTime) [member function]
cls.add_method('Start',
'void',
[param('ns3::Time const &', 'startTime')],
is_virtual=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Stop(ns3::Time const & stopTime) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'stopTime')],
is_virtual=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3DataOutputInterface_methods(root_module, cls):
## data-output-interface.h (module 'stats'): ns3::DataOutputInterface::DataOutputInterface(ns3::DataOutputInterface const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DataOutputInterface const &', 'arg0')])
## data-output-interface.h (module 'stats'): ns3::DataOutputInterface::DataOutputInterface() [constructor]
cls.add_constructor([])
## data-output-interface.h (module 'stats'): std::string ns3::DataOutputInterface::GetFilePrefix() const [member function]
cls.add_method('GetFilePrefix',
'std::string',
[],
is_const=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputInterface::Output(ns3::DataCollector & dc) [member function]
cls.add_method('Output',
'void',
[param('ns3::DataCollector &', 'dc')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputInterface::SetFilePrefix(std::string const prefix) [member function]
cls.add_method('SetFilePrefix',
'void',
[param('std::string const', 'prefix')])
## data-output-interface.h (module 'stats'): void ns3::DataOutputInterface::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3MinMaxAvgTotalCalculator__Double_methods(root_module, cls):
## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double>::MinMaxAvgTotalCalculator(ns3::MinMaxAvgTotalCalculator<double> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MinMaxAvgTotalCalculator< double > const &', 'arg0')])
## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double>::MinMaxAvgTotalCalculator() [constructor]
cls.add_constructor([])
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::Output(ns3::DataOutputCallback & callback) const [member function]
cls.add_method('Output',
'void',
[param('ns3::DataOutputCallback &', 'callback')],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::Reset() [member function]
cls.add_method('Reset',
'void',
[])
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::Update(double const i) [member function]
cls.add_method('Update',
'void',
[param('double const', 'i')])
## basic-data-calculators.h (module 'stats'): long int ns3::MinMaxAvgTotalCalculator<double>::getCount() const [member function]
cls.add_method('getCount',
'long int',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getMax() const [member function]
cls.add_method('getMax',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getMean() const [member function]
cls.add_method('getMean',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getMin() const [member function]
cls.add_method('getMin',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getSqrSum() const [member function]
cls.add_method('getSqrSum',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getStddev() const [member function]
cls.add_method('getStddev',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getSum() const [member function]
cls.add_method('getSum',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getVariance() const [member function]
cls.add_method('getVariance',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
JBonsink/GSOC-2013
|
tools/ns-allinone-3.14.1/ns-3.14.1/src/tools/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-3.0
| 187,684
|
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Cascade Floating IP Floating Port deletion
Revision ID: f15b1fb526dd
Revises: 57dd745253a6
Create Date: 2014-08-24 21:56:36.422885
"""
# revision identifiers, used by Alembic.
revision = 'f15b1fb526dd'
down_revision = '57dd745253a6'
from alembic import op
from sqlalchemy.engine import reflection
def _drop_constraint():
inspector = reflection.Inspector.from_engine(op.get_bind())
fk_name = [fk['name'] for fk in
inspector.get_foreign_keys('floatingips')
if 'floating_port_id' in fk['constrained_columns']]
op.drop_constraint(fk_name[0], 'floatingips', 'foreignkey')
def upgrade():
_drop_constraint()
op.create_foreign_key(
name=None,
source='floatingips', referent='ports',
local_cols=['floating_port_id'], remote_cols=['id'], ondelete='CASCADE'
)
|
paninetworks/neutron
|
neutron/db/migration/alembic_migrations/versions/f15b1fb526dd_cascade_floatingip.py
|
Python
|
apache-2.0
| 1,453
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Operations to select TPU core to run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
frreiss/tensorflow-fred
|
tensorflow/python/tpu/ops/tpu_ordinal_selector_op.py
|
Python
|
apache-2.0
| 843
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from branch_utility import BranchUtility, ChannelInfo
from test_data.canned_data import (CANNED_BRANCHES, CANNED_CHANNELS)
class TestBranchUtility(object):
'''Mimics BranchUtility to return valid-ish data without needing omahaproxy
data.
'''
def __init__(self, branches, channels):
''' Parameters: |branches| is a mapping of versions to branches, and
|channels| is a mapping of channels to versions.
'''
self._branches = branches
self._channels = channels
@staticmethod
def CreateWithCannedData():
'''Returns a TestBranchUtility that uses 'canned' test data pulled from
older branches of SVN data.
'''
return TestBranchUtility(CANNED_BRANCHES, CANNED_CHANNELS)
def GetAllChannelInfo(self):
return [self.GetChannelInfo(channel)
for channel in BranchUtility.GetAllChannelNames()]
def GetChannelInfo(self, channel):
version = self._channels[channel]
return ChannelInfo(channel, self.GetBranchForVersion(version), version)
def GetBranchForVersion(self, version):
return self._branches[version]
def GetChannelForVersion(self, version):
for channel in self._channels.iterkeys():
if self._channels[channel] == version:
return channel
|
espadrine/opera
|
chromium/src/chrome/common/extensions/docs/server2/test_branch_utility.py
|
Python
|
bsd-3-clause
| 1,398
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestModelSaving(test.TestCase):
def test_sequential_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_fuctional_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model.model._make_train_function()
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
class TestSequential(test.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
def test_sequential_pop(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.compile(loss='mse', optimizer='sgd')
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertEqual(model.output_shape, (None, num_hidden))
model.compile(loss='mse', optimizer='sgd')
y = np.random.random((batch_size, num_hidden))
model.fit(x, y, epochs=1)
if __name__ == '__main__':
test.main()
|
wangyum/tensorflow
|
tensorflow/contrib/keras/python/keras/models_test.py
|
Python
|
apache-2.0
| 6,256
|
# -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError, DownloadFailedError
from ..language import Language, language_set
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..utils import to_unicode
from ..videos import Episode, Movie
import gzip
import logging
import os.path
import xmlrpclib
from sickbeard import logger as glog
logger = logging.getLogger("subliminal")
class OpenSubtitles(ServiceBase):
server_url = 'http://api.opensubtitles.org/xml-rpc'
site_url = 'http://www.opensubtitles.org'
api_based = True
# Source: http://www.opensubtitles.org/addons/export_languages.php
languages = language_set(['aar', 'abk', 'ace', 'ach', 'ada', 'ady', 'afa', 'afh', 'afr', 'ain', 'aka', 'akk',
'alb', 'ale', 'alg', 'alt', 'amh', 'ang', 'apa', 'ara', 'arc', 'arg', 'arm', 'arn',
'arp', 'art', 'arw', 'asm', 'ast', 'ath', 'aus', 'ava', 'ave', 'awa', 'aym', 'aze',
'bad', 'bai', 'bak', 'bal', 'bam', 'ban', 'baq', 'bas', 'bat', 'bej', 'bel', 'bem',
'ben', 'ber', 'bho', 'bih', 'bik', 'bin', 'bis', 'bla', 'bnt', 'bos', 'bra', 'bre',
'btk', 'bua', 'bug', 'bul', 'bur', 'byn', 'cad', 'cai', 'car', 'cat', 'cau', 'ceb',
'cel', 'cha', 'chb', 'che', 'chg', 'chi', 'chk', 'chm', 'chn', 'cho', 'chp', 'chr',
'chu', 'chv', 'chy', 'cmc', 'cop', 'cor', 'cos', 'cpe', 'cpf', 'cpp', 'cre', 'crh',
'crp', 'csb', 'cus', 'cze', 'dak', 'dan', 'dar', 'day', 'del', 'den', 'dgr', 'din',
'div', 'doi', 'dra', 'dua', 'dum', 'dut', 'dyu', 'dzo', 'efi', 'egy', 'eka', 'ell',
'elx', 'eng', 'enm', 'epo', 'est', 'ewe', 'ewo', 'fan', 'fao', 'fat', 'fij', 'fil',
'fin', 'fiu', 'fon', 'fre', 'frm', 'fro', 'fry', 'ful', 'fur', 'gaa', 'gay', 'gba',
'gem', 'geo', 'ger', 'gez', 'gil', 'gla', 'gle', 'glg', 'glv', 'gmh', 'goh', 'gon',
'gor', 'got', 'grb', 'grc', 'grn', 'guj', 'gwi', 'hai', 'hat', 'hau', 'haw', 'heb',
'her', 'hil', 'him', 'hin', 'hit', 'hmn', 'hmo', 'hrv', 'hun', 'hup', 'iba', 'ibo',
'ice', 'ido', 'iii', 'ijo', 'iku', 'ile', 'ilo', 'ina', 'inc', 'ind', 'ine', 'inh',
'ipk', 'ira', 'iro', 'ita', 'jav', 'jpn', 'jpr', 'jrb', 'kaa', 'kab', 'kac', 'kal',
'kam', 'kan', 'kar', 'kas', 'kau', 'kaw', 'kaz', 'kbd', 'kha', 'khi', 'khm', 'kho',
'kik', 'kin', 'kir', 'kmb', 'kok', 'kom', 'kon', 'kor', 'kos', 'kpe', 'krc', 'kro',
'kru', 'kua', 'kum', 'kur', 'kut', 'lad', 'lah', 'lam', 'lao', 'lat', 'lav', 'lez',
'lim', 'lin', 'lit', 'lol', 'loz', 'ltz', 'lua', 'lub', 'lug', 'lui', 'lun', 'luo',
'lus', 'mac', 'mad', 'mag', 'mah', 'mai', 'mak', 'mal', 'man', 'mao', 'map', 'mar',
'mas', 'may', 'mdf', 'mdr', 'men', 'mga', 'mic', 'min', 'mkh', 'mlg', 'mlt', 'mnc',
'mni', 'mno', 'moh', 'mon', 'mos', 'mun', 'mus', 'mwl', 'mwr', 'myn', 'myv', 'nah',
'nai', 'nap', 'nau', 'nav', 'nbl', 'nde', 'ndo', 'nds', 'nep', 'new', 'nia', 'nic',
'niu', 'nno', 'nob', 'nog', 'non', 'nor', 'nso', 'nub', 'nwc', 'nya', 'nym', 'nyn',
'nyo', 'nzi', 'oci', 'oji', 'ori', 'orm', 'osa', 'oss', 'ota', 'oto', 'paa', 'pag',
'pal', 'pam', 'pan', 'pap', 'pau', 'peo', 'per', 'phi', 'phn', 'pli', 'pol', 'pon',
'por', 'pra', 'pro', 'pus', 'que', 'raj', 'rap', 'rar', 'roa', 'roh', 'rom', 'rum',
'run', 'rup', 'rus', 'sad', 'sag', 'sah', 'sai', 'sal', 'sam', 'san', 'sas', 'sat',
'scn', 'sco', 'sel', 'sem', 'sga', 'sgn', 'shn', 'sid', 'sin', 'sio', 'sit', 'sla',
'slo', 'slv', 'sma', 'sme', 'smi', 'smj', 'smn', 'smo', 'sms', 'sna', 'snd', 'snk',
'sog', 'som', 'son', 'sot', 'spa', 'srd', 'srp', 'srr', 'ssa', 'ssw', 'suk', 'sun',
'sus', 'sux', 'swa', 'swe', 'syr', 'tah', 'tai', 'tam', 'tat', 'tel', 'tem', 'ter',
'tet', 'tgk', 'tgl', 'tha', 'tib', 'tig', 'tir', 'tiv', 'tkl', 'tlh', 'tli', 'tmh',
'tog', 'ton', 'tpi', 'tsi', 'tsn', 'tso', 'tuk', 'tum', 'tup', 'tur', 'tut', 'tvl',
'twi', 'tyv', 'udm', 'uga', 'uig', 'ukr', 'umb', 'urd', 'uzb', 'vai', 'ven', 'vie',
'vol', 'vot', 'wak', 'wal', 'war', 'was', 'wel', 'wen', 'wln', 'wol', 'xal', 'xho',
'yao', 'yap', 'yid', 'yor', 'ypk', 'zap', 'zen', 'zha', 'znd', 'zul', 'zun',
'pob', 'rum-MD'])
language_map = {'mol': Language('rum-MD'), 'scc': Language('srp'),
Language('rum-MD'): 'mol', Language('srp'): 'scc'}
language_code = 'alpha3'
videos = [Episode, Movie]
require_video = False
confidence_order = ['moviehash', 'imdbid', 'fulltext']
def __init__(self, config=None):
super(OpenSubtitles, self).__init__(config)
self.server = xmlrpclib.ServerProxy(self.server_url)
self.token = None
def init(self):
super(OpenSubtitles, self).init()
result = self.server.LogIn('', '', 'eng', self.user_agent)
if result['status'] != '200 OK':
raise ServiceError('Login failed')
self.token = result['token']
def terminate(self):
super(OpenSubtitles, self).terminate()
if self.token:
self.server.LogOut(self.token)
def query(self, filepath, languages, moviehash=None, size=None, imdbid=None, query=None):
searches = []
if moviehash and size:
searches.append({'moviehash': moviehash, 'moviebytesize': size})
if imdbid:
searches.append({'imdbid': imdbid})
if query:
searches.append({'query': query})
if not searches:
raise ServiceError('One or more parameter missing')
for search in searches:
search['sublanguageid'] = ','.join(self.get_code(l) for l in languages)
logger.debug(u'Getting subtitles %r with token %s' % (searches, self.token))
glog.log(u'Searching Subtitles on Opensubtitles with hash : %s' % (moviehash))
results = self.server.SearchSubtitles(self.token, searches)
if not results['data']:
logger.debug(u'Could not find subtitles for %r with token %s' % (searches, self.token))
return []
subtitles = []
for result in results['data']:
language = self.get_language(result['SubLanguageID'])
path = get_subtitle_path(filepath, language, self.config.multi)
confidence = 1 - float(self.confidence_order.index(result['MatchedBy'])) / float(len(self.confidence_order))
subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), result['SubDownloadLink'],
release=to_unicode(result['SubFileName']), confidence=confidence)
subtitles.append(subtitle)
return subtitles
def list_checked(self, video, languages):
results = []
if video.exists:
results = self.query(video.path or video.release, languages, moviehash=video.hashes['OpenSubtitles'], size=str(video.size))
elif video.imdbid:
results = self.query(video.path or video.release, languages, imdbid=video.imdbid)
elif isinstance(video, Episode):
results = self.query(video.path or video.release, languages, query=video.series)
elif isinstance(video, Movie):
results = self.query(video.path or video.release, languages, query=video.title)
return results
def download(self, subtitle):
#TODO: Use OpenSubtitles DownloadSubtitles method
try:
self.download_file(subtitle.link, subtitle.path + '.gz')
with open(subtitle.path, 'wb') as dump:
gz = gzip.open(subtitle.path + '.gz')
dump.write(gz.read())
gz.close()
except Exception as e:
if os.path.exists(subtitle.path):
os.remove(subtitle.path)
raise DownloadFailedError(str(e))
finally:
if os.path.exists(subtitle.path + '.gz'):
os.remove(subtitle.path + '.gz')
return subtitle
Service = OpenSubtitles
|
Branlala/docker-sickbeardfr
|
sickbeard/lib/subliminal/services/opensubtitles.py
|
Python
|
mit
| 9,619
|
# -*- coding: utf-8 -*-
"""
Unit tests for preference APIs.
"""
import datetime
import ddt
import unittest
from mock import patch
from pytz import UTC
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from dateutil.parser import parse as parse_datetime
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ...accounts.api import create_account
from ...errors import UserNotFound, UserNotAuthorized, PreferenceValidationError, PreferenceUpdateError
from ...models import UserProfile, UserOrgTag
from ...preferences.api import (
get_user_preference, get_user_preferences, set_user_preference, update_user_preferences, delete_user_preference,
update_email_opt_in
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Account APIs are only supported in LMS')
class TestPreferenceAPI(TestCase):
"""
These tests specifically cover the parts of the API methods that are not covered by test_views.py.
This includes the specific types of error raised, and default behavior when optional arguments
are not specified.
"""
password = "test"
def setUp(self):
super(TestPreferenceAPI, self).setUp()
self.user = UserFactory.create(password=self.password)
self.different_user = UserFactory.create(password=self.password)
self.staff_user = UserFactory(is_staff=True, password=self.password)
self.no_such_user = UserFactory.create(password=self.password)
self.no_such_user.username = "no_such_user"
self.test_preference_key = "test_key"
self.test_preference_value = "test_value"
set_user_preference(self.user, self.test_preference_key, self.test_preference_value)
def test_get_user_preference(self):
"""
Verifies the basic behavior of get_user_preference.
"""
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
self.test_preference_value
)
self.assertEqual(
get_user_preference(self.staff_user, self.test_preference_key, username=self.user.username),
self.test_preference_value
)
def test_get_user_preference_errors(self):
"""
Verifies that get_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
get_user_preference(self.user, self.test_preference_key, username="no_such_user")
with self.assertRaises(UserNotFound):
get_user_preference(self.no_such_user, self.test_preference_key)
with self.assertRaises(UserNotAuthorized):
get_user_preference(self.different_user, self.test_preference_key, username=self.user.username)
def test_get_user_preferences(self):
"""
Verifies the basic behavior of get_user_preferences.
"""
expected_user_preferences = {
self.test_preference_key: self.test_preference_value,
}
self.assertEqual(get_user_preferences(self.user), expected_user_preferences)
self.assertEqual(get_user_preferences(self.staff_user, username=self.user.username), expected_user_preferences)
def test_get_user_preferences_errors(self):
"""
Verifies that get_user_preferences returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
get_user_preferences(self.user, username="no_such_user")
with self.assertRaises(UserNotFound):
get_user_preferences(self.no_such_user)
with self.assertRaises(UserNotAuthorized):
get_user_preferences(self.different_user, username=self.user.username)
def test_set_user_preference(self):
"""
Verifies the basic behavior of set_user_preference.
"""
test_key = u'ⓟⓡⓔⓕⓔⓡⓔⓝⓒⓔ_ⓚⓔⓨ'
test_value = u'ǝnןɐʌ_ǝɔuǝɹǝɟǝɹd'
set_user_preference(self.user, test_key, test_value)
self.assertEqual(get_user_preference(self.user, test_key), test_value)
set_user_preference(self.user, test_key, "new_value", username=self.user.username)
self.assertEqual(get_user_preference(self.user, test_key), "new_value")
@patch('openedx.core.djangoapps.user_api.models.UserPreference.save')
def test_set_user_preference_errors(self, user_preference_save):
"""
Verifies that set_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
set_user_preference(self.user, self.test_preference_key, "new_value", username="no_such_user")
with self.assertRaises(UserNotFound):
set_user_preference(self.no_such_user, self.test_preference_key, "new_value")
with self.assertRaises(UserNotAuthorized):
set_user_preference(self.staff_user, self.test_preference_key, "new_value", username=self.user.username)
with self.assertRaises(UserNotAuthorized):
set_user_preference(self.different_user, self.test_preference_key, "new_value", username=self.user.username)
too_long_key = "x" * 256
with self.assertRaises(PreferenceValidationError) as context_manager:
set_user_preference(self.user, too_long_key, "new_value")
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[too_long_key],
{
"developer_message": get_expected_validation_developer_message(too_long_key, "new_value"),
"user_message": get_expected_key_error_user_message(too_long_key, "new_value"),
}
)
for empty_value in (None, "", " "):
with self.assertRaises(PreferenceValidationError) as context_manager:
set_user_preference(self.user, self.test_preference_key, empty_value)
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[self.test_preference_key],
{
"developer_message": get_empty_preference_message(self.test_preference_key),
"user_message": get_empty_preference_message(self.test_preference_key),
}
)
user_preference_save.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
set_user_preference(self.user, u"new_key_ȻħȺɍłɇs", u"new_value_ȻħȺɍłɇs")
self.assertEqual(
context_manager.exception.developer_message,
u"Save failed for user preference 'new_key_ȻħȺɍłɇs' with value 'new_value_ȻħȺɍłɇs': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Save failed for user preference 'new_key_ȻħȺɍłɇs' with value 'new_value_ȻħȺɍłɇs'."
)
def test_update_user_preferences(self):
"""
Verifies the basic behavior of update_user_preferences.
"""
expected_user_preferences = {
self.test_preference_key: "new_value",
}
set_user_preference(self.user, self.test_preference_key, "new_value")
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
"new_value"
)
set_user_preference(self.user, self.test_preference_key, "new_value", username=self.user.username)
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
"new_value"
)
def test_update_user_preferences_with_username(self):
"""
Verifies the basic behavior of update_user_preferences when passed
username string.
"""
update_data = {
self.test_preference_key: "new_value"
}
update_user_preferences(self.user, update_data, user=self.user.username)
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
"new_value"
)
def test_update_user_preferences_with_user(self):
"""
Verifies the basic behavior of update_user_preferences when passed
user object.
"""
update_data = {
self.test_preference_key: "new_value"
}
update_user_preferences(self.user, update_data, user=self.user)
self.assertEqual(
get_user_preference(self.user, self.test_preference_key),
"new_value"
)
@patch('openedx.core.djangoapps.user_api.models.UserPreference.delete')
@patch('openedx.core.djangoapps.user_api.models.UserPreference.save')
def test_update_user_preferences_errors(self, user_preference_save, user_preference_delete):
"""
Verifies that set_user_preferences returns appropriate errors.
"""
update_data = {
self.test_preference_key: "new_value"
}
with self.assertRaises(UserNotFound):
update_user_preferences(self.user, update_data, user="no_such_user")
with self.assertRaises(UserNotFound):
update_user_preferences(self.no_such_user, update_data)
with self.assertRaises(UserNotAuthorized):
update_user_preferences(self.staff_user, update_data, user=self.user.username)
with self.assertRaises(UserNotAuthorized):
update_user_preferences(self.different_user, update_data, user=self.user.username)
too_long_key = "x" * 256
with self.assertRaises(PreferenceValidationError) as context_manager:
update_user_preferences(self.user, {too_long_key: "new_value"})
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[too_long_key],
{
"developer_message": get_expected_validation_developer_message(too_long_key, "new_value"),
"user_message": get_expected_key_error_user_message(too_long_key, "new_value"),
}
)
for empty_value in ("", " "):
with self.assertRaises(PreferenceValidationError) as context_manager:
update_user_preferences(self.user, {self.test_preference_key: empty_value})
errors = context_manager.exception.preference_errors
self.assertEqual(len(errors.keys()), 1)
self.assertEqual(
errors[self.test_preference_key],
{
"developer_message": get_empty_preference_message(self.test_preference_key),
"user_message": get_empty_preference_message(self.test_preference_key),
}
)
user_preference_save.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
update_user_preferences(self.user, {self.test_preference_key: "new_value"})
self.assertEqual(
context_manager.exception.developer_message,
u"Save failed for user preference 'test_key' with value 'new_value': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Save failed for user preference 'test_key' with value 'new_value'."
)
user_preference_delete.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
update_user_preferences(self.user, {self.test_preference_key: None})
self.assertEqual(
context_manager.exception.developer_message,
u"Delete failed for user preference 'test_key': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Delete failed for user preference 'test_key'."
)
def test_delete_user_preference(self):
"""
Verifies the basic behavior of delete_user_preference.
"""
self.assertTrue(delete_user_preference(self.user, self.test_preference_key))
set_user_preference(self.user, self.test_preference_key, self.test_preference_value)
self.assertTrue(delete_user_preference(self.user, self.test_preference_key, username=self.user.username))
self.assertFalse(delete_user_preference(self.user, "no_such_key"))
@patch('openedx.core.djangoapps.user_api.models.UserPreference.delete')
def test_delete_user_preference_errors(self, user_preference_delete):
"""
Verifies that delete_user_preference returns appropriate errors.
"""
with self.assertRaises(UserNotFound):
delete_user_preference(self.user, self.test_preference_key, username="no_such_user")
with self.assertRaises(UserNotFound):
delete_user_preference(self.no_such_user, self.test_preference_key)
with self.assertRaises(UserNotAuthorized):
delete_user_preference(self.staff_user, self.test_preference_key, username=self.user.username)
with self.assertRaises(UserNotAuthorized):
delete_user_preference(self.different_user, self.test_preference_key, username=self.user.username)
user_preference_delete.side_effect = [Exception, None]
with self.assertRaises(PreferenceUpdateError) as context_manager:
delete_user_preference(self.user, self.test_preference_key)
self.assertEqual(
context_manager.exception.developer_message,
u"Delete failed for user preference 'test_key': "
)
self.assertEqual(
context_manager.exception.user_message,
u"Delete failed for user preference 'test_key'."
)
@ddt.ddt
class UpdateEmailOptInTests(ModuleStoreTestCase):
"""
Test cases to cover API-driven email list opt-in update workflows
"""
USERNAME = u'frank-underwood'
PASSWORD = u'ṕáśśẃőŕd'
EMAIL = u'frank+underwood@example.com'
@ddt.data(
# Check that a 27 year old can opt-in
(27, True, u"True"),
# Check that a 32-year old can opt-out
(32, False, u"False"),
# Check that someone 14 years old can opt-in
(14, True, u"True"),
# Check that someone 13 years old cannot opt-in (must have turned 13 before this year)
(13, True, u"False"),
# Check that someone 12 years old cannot opt-in
(12, True, u"False")
)
@ddt.unpack
@override_settings(EMAIL_OPTIN_MINIMUM_AGE=13)
def test_update_email_optin(self, age, option, expected_result):
# Create the course and account.
course = CourseFactory.create()
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Set year of birth
user = User.objects.get(username=self.USERNAME)
profile = UserProfile.objects.get(user=user)
year_of_birth = datetime.datetime.now().year - age
profile.year_of_birth = year_of_birth
profile.save()
update_email_opt_in(user, course.id.org, option)
result_obj = UserOrgTag.objects.get(user=user, org=course.id.org, key='email-optin')
self.assertEqual(result_obj.value, expected_result)
def test_update_email_optin_no_age_set(self):
# Test that the API still works if no age is specified.
# Create the course and account.
course = CourseFactory.create()
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
user = User.objects.get(username=self.USERNAME)
update_email_opt_in(user, course.id.org, True)
result_obj = UserOrgTag.objects.get(user=user, org=course.id.org, key='email-optin')
self.assertEqual(result_obj.value, u"True")
def test_update_email_optin_anonymous_user(self):
"""Verify that the API raises an exception for a user with no profile."""
course = CourseFactory.create()
no_profile_user, __ = User.objects.get_or_create(username="no_profile_user", password=self.PASSWORD)
with self.assertRaises(UserNotFound):
update_email_opt_in(no_profile_user, course.id.org, True)
@ddt.data(
# Check that a 27 year old can opt-in, then out.
(27, True, False, u"False"),
# Check that a 32-year old can opt-out, then in.
(32, False, True, u"True"),
# Check that someone 13 years old can opt-in, then out.
(13, True, False, u"False"),
# Check that someone 12 years old cannot opt-in, then explicitly out.
(12, True, False, u"False")
)
@ddt.unpack
@override_settings(EMAIL_OPTIN_MINIMUM_AGE=13)
def test_change_email_optin(self, age, option, second_option, expected_result):
# Create the course and account.
course = CourseFactory.create()
create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
# Set year of birth
user = User.objects.get(username=self.USERNAME)
profile = UserProfile.objects.get(user=user)
year_of_birth = datetime.datetime.now(UTC).year - age
profile.year_of_birth = year_of_birth
profile.save()
update_email_opt_in(user, course.id.org, option)
update_email_opt_in(user, course.id.org, second_option)
result_obj = UserOrgTag.objects.get(user=user, org=course.id.org, key='email-optin')
self.assertEqual(result_obj.value, expected_result)
def _assert_is_datetime(self, timestamp):
"""
Internal helper to assert the type of the provided timestamp value
"""
if not timestamp:
return False
try:
parse_datetime(timestamp)
except ValueError:
return False
else:
return True
def get_expected_validation_developer_message(preference_key, preference_value):
"""
Returns the expected dict of validation messages for the specified key.
"""
return u"Value '{preference_value}' not valid for preference '{preference_key}': {error}".format(
preference_key=preference_key,
preference_value=preference_value,
error={
"key": [u"Ensure this value has at most 255 characters (it has 256)."]
}
)
def get_expected_key_error_user_message(preference_key, preference_value):
"""
Returns the expected user message for an invalid key.
"""
return u"Invalid user preference key '{preference_key}'.".format(preference_key=preference_key)
def get_empty_preference_message(preference_key):
"""
Returns the validation message shown for an empty preference.
"""
return "Preference '{preference_key}' cannot be set to an empty value.".format(preference_key=preference_key)
|
simbs/edx-platform
|
openedx/core/djangoapps/user_api/preferences/tests/test_api.py
|
Python
|
agpl-3.0
| 18,922
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from binascii import hexlify, unhexlify
from cStringIO import StringIO
class DecodeScriptTest(BitcoinTestFramework):
"""Tests decoding scripts via RPC command "decodescript"."""
def setup_chain(self):
print('Initializing test directory ' + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777'
push_public_key_hash = '14' + public_key_hash
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
def decoderawtransaction_asm_sighashtype(self):
"""Tests decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(StringIO(unhexlify(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = hexlify(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = unhexlify(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(hexlify(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = unhexlify(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(hexlify(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = unhexlify('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(hexlify(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = unhexlify('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(hexlify(txSave.serialize()))
print(hexlify('636174'))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
|
mainconceptx/DAS
|
qa/rpc-tests/decodescript.py
|
Python
|
mit
| 13,647
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
from custom_logger import CustomLogger
from extensions_paths import EXAMPLES
from file_system_util import CreateURLsFromPaths
from future import All, Future
from render_servlet import RenderServlet
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer
_SUPPORTED_TARGETS = {
'examples': (EXAMPLES, 'extensions/examples'),
}
_log = CustomLogger('render_refresher')
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_log.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_log.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_log.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_log.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class RenderRefresher(object):
'''Used to refresh any set of renderable resources. Currently only supports
assets related to extensions examples.'''
def __init__(self, server_instance, request):
self._server_instance = server_instance
self._request = request
def Refresh(self):
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(self._server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(master_fs, path, prefix)]
return _RequestEachItem(path, files, render)
return All(request_files_in_dir(dir, prefix=prefix)
for dir, prefix in _SUPPORTED_TARGETS.itervalues())
|
js0701/chromium-crosswalk
|
chrome/common/extensions/docs/server2/render_refresher.py
|
Python
|
bsd-3-clause
| 3,318
|
raise TypeError
|
rodsol/opencog
|
tests/cython/agents/test_agent_w_load_exception.py
|
Python
|
agpl-3.0
| 16
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Replaces gyp files in tree with files from here that
make the build use system libraries.
"""
import optparse
import os.path
import shutil
import sys
REPLACEMENTS = {
'use_system_expat': 'third_party/expat/expat.gyp',
'use_system_ffmpeg': 'third_party/ffmpeg/ffmpeg.gyp',
'use_system_flac': 'third_party/flac/flac.gyp',
'use_system_harfbuzz': 'third_party/harfbuzz-ng/harfbuzz.gyp',
'use_system_icu': 'third_party/icu/icu.gyp',
'use_system_jsoncpp': 'third_party/jsoncpp/jsoncpp.gyp',
'use_system_libevent': 'third_party/libevent/libevent.gyp',
'use_system_libjpeg': 'third_party/libjpeg/libjpeg.gyp',
'use_system_libpng': 'third_party/libpng/libpng.gyp',
'use_system_libusb': 'third_party/libusb/libusb.gyp',
'use_system_libvpx': 'third_party/libvpx/libvpx.gyp',
'use_system_libwebp': 'third_party/libwebp/libwebp.gyp',
'use_system_libxml': 'third_party/libxml/libxml.gyp',
'use_system_libxnvctrl' : 'third_party/libXNVCtrl/libXNVCtrl.gyp',
'use_system_libxslt': 'third_party/libxslt/libxslt.gyp',
'use_system_openssl': 'third_party/boringssl/boringssl.gyp',
'use_system_opus': 'third_party/opus/opus.gyp',
'use_system_protobuf': 'third_party/protobuf/protobuf.gyp',
'use_system_re2': 'third_party/re2/re2.gyp',
'use_system_snappy': 'third_party/snappy/snappy.gyp',
'use_system_speex': 'third_party/speex/speex.gyp',
'use_system_sqlite': 'third_party/sqlite/sqlite.gyp',
'use_system_v8': 'v8/tools/gyp/v8.gyp',
'use_system_zlib': 'third_party/zlib/zlib.gyp',
}
def DoMain(argv):
my_dirname = os.path.dirname(__file__)
source_tree_root = os.path.abspath(
os.path.join(my_dirname, '..', '..', '..'))
parser = optparse.OptionParser()
# Accept arguments in gyp command-line syntax, so that the caller can re-use
# command-line for this script and gyp.
parser.add_option('-D', dest='defines', action='append')
parser.add_option('--undo', action='store_true')
options, args = parser.parse_args(argv)
for flag, path in REPLACEMENTS.items():
if '%s=1' % flag not in options.defines:
continue
if options.undo:
# Restore original file, and also remove the backup.
# This is meant to restore the source tree to its original state.
os.rename(os.path.join(source_tree_root, path + '.orig'),
os.path.join(source_tree_root, path))
else:
# Create a backup copy for --undo.
shutil.copyfile(os.path.join(source_tree_root, path),
os.path.join(source_tree_root, path + '.orig'))
# Copy the gyp file from directory of this script to target path.
shutil.copyfile(os.path.join(my_dirname, os.path.basename(path)),
os.path.join(source_tree_root, path))
return 0
if __name__ == '__main__':
sys.exit(DoMain(sys.argv))
|
s20121035/rk3288_android5.1_repo
|
external/chromium_org/build/linux/unbundle/replace_gyp_files.py
|
Python
|
gpl-3.0
| 2,992
|
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
import pytest
try:
from ansible.modules.network.fortimanager import fmgr_fwobj_ippool
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
def load_fixtures():
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + "/{filename}.json".format(
filename=os.path.splitext(os.path.basename(__file__))[0])
try:
with open(fixture_path, "r") as fixture_file:
fixture_data = json.load(fixture_file)
except IOError:
return []
return [fixture_data]
@pytest.fixture(autouse=True)
def module_mock(mocker):
connection_class_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule')
return connection_class_mock
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortimanager.fmgr_fwobj_ippool.Connection')
return connection_class_mock
@pytest.fixture(scope="function", params=load_fixtures())
def fixture_data(request):
func_name = request.function.__name__.replace("test_", "")
return request.param.get(func_name, None)
fmg_instance = FortiManagerHandler(connection_mock, module_mock)
def test_fmgr_fwobj_ippool_modify(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Test using fixture 1 #
output = fmgr_fwobj_ippool.fmgr_fwobj_ippool_modify(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 2 #
output = fmgr_fwobj_ippool.fmgr_fwobj_ippool_modify(fmg_instance, fixture_data[1]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 3 #
output = fmgr_fwobj_ippool.fmgr_fwobj_ippool_modify(fmg_instance, fixture_data[2]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 4 #
output = fmgr_fwobj_ippool.fmgr_fwobj_ippool_modify(fmg_instance, fixture_data[3]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 5 #
output = fmgr_fwobj_ippool.fmgr_fwobj_ippool_modify(fmg_instance, fixture_data[4]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 6 #
output = fmgr_fwobj_ippool.fmgr_fwobj_ippool_modify(fmg_instance, fixture_data[5]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 7 #
output = fmgr_fwobj_ippool.fmgr_fwobj_ippool_modify(fmg_instance, fixture_data[6]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 8 #
output = fmgr_fwobj_ippool.fmgr_fwobj_ippool_modify(fmg_instance, fixture_data[7]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
|
kustodian/ansible
|
test/units/modules/network/fortimanager/test_fmgr_fwobj_ippool.py
|
Python
|
gpl-3.0
| 3,844
|
#!/usr/bin/python
# Copyright (c) 2016 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
DOCUMENTATION = '''
---
module: os_user_role
short_description: Associate OpenStack Identity users and roles
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Grant and revoke roles in either project or domain context for
OpenStack Identity Users.
options:
role:
description:
- Name or ID for the role.
required: true
user:
description:
- Name or ID for the user. If I(user) is not specified, then
I(group) is required. Both may not be specified.
required: false
default: null
group:
description:
- Name or ID for the group. Valid only with keystone version 3.
If I(group) is not specified, then I(user) is required. Both
may not be specified.
required: false
default: null
project:
description:
- Name or ID of the project to scope the role assocation to.
If you are using keystone version 2, then this value is required.
required: false
default: null
domain:
description:
- ID of the domain to scope the role association to. Valid only with
keystone version 3, and required if I(project) is not specified.
required: false
default: null
state:
description:
- Should the roles be present or absent on the user.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Grant an admin role on the user admin in the project project1
- os_user_role:
cloud: mycloud
user: admin
role: admin
project: project1
# Revoke the admin role from the user barney in the newyork domain
- os_user_role:
cloud: mycloud
state: absent
user: barney
role: admin
domain: newyork
'''
RETURN = '''
#
'''
def _system_state_change(state, assignment):
if state == 'present' and not assignment:
return True
elif state == 'absent' and assignment:
return True
return False
def _build_kwargs(user, group, project, domain):
kwargs = {}
if user:
kwargs['user'] = user
if group:
kwargs['group'] = group
if project:
kwargs['project'] = project
if domain:
kwargs['domain'] = domain
return kwargs
def main():
argument_spec = openstack_full_argument_spec(
role=dict(required=True),
user=dict(required=False),
group=dict(required=False),
project=dict(required=False),
domain=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
required_one_of=[
['user', 'group']
])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# role grant/revoke API introduced in 1.5.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.5.0')):
module.fail_json(msg='shade 1.5.0 or higher is required for this module')
role = module.params.pop('role')
user = module.params.pop('user')
group = module.params.pop('group')
project = module.params.pop('project')
domain = module.params.pop('domain')
state = module.params.pop('state')
try:
cloud = shade.operator_cloud(**module.params)
filters = {}
r = cloud.get_role(role)
if r is None:
module.fail_json(msg="Role %s is not valid" % role)
filters['role'] = r['id']
if user:
u = cloud.get_user(user)
if u is None:
module.fail_json(msg="User %s is not valid" % user)
filters['user'] = u['id']
if group:
g = cloud.get_group(group)
if g is None:
module.fail_json(msg="Group %s is not valid" % group)
filters['group'] = g['id']
if domain:
d = cloud.get_domain(domain)
if d is None:
module.fail_json(msg="Domain %s is not valid" % domain)
filters['domain'] = d['id']
if project:
if domain:
p = cloud.get_project(project, domain_id=filters['domain'])
else:
p = cloud.get_project(project)
if p is None:
module.fail_json(msg="Project %s is not valid" % project)
filters['project'] = p['id']
assignment = cloud.list_role_assignments(filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, assignment))
changed = False
if state == 'present':
if not assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.grant_role(role, **kwargs)
changed = True
elif state == 'absent':
if assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.revoke_role(role, **kwargs)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
calamityman/ansible-modules-extras
|
cloud/openstack/os_user_role.py
|
Python
|
gpl-3.0
| 6,200
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from openstack_dashboard.api import sahara as saharaclient
import openstack_dashboard.dashboards.project.data_processing. \
cluster_templates.workflows.create as create_flow
import openstack_dashboard.dashboards.project.data_processing.utils. \
workflow_helpers as wf_helpers
LOG = logging.getLogger(__name__)
class CopyClusterTemplate(create_flow.ConfigureClusterTemplate):
success_message = _("Cluster Template copy %s created")
entry_point = "generalconfigaction"
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
template_id = context_seed["template_id"]
try:
template = saharaclient.cluster_template_get(request, template_id)
self._set_configs_to_copy(template.cluster_configs)
request.GET = request.GET.copy()
request.GET.update({"plugin_name": template.plugin_name,
"hadoop_version": template.hadoop_version,
"aa_groups": template.anti_affinity})
super(CopyClusterTemplate, self).__init__(request, context_seed,
entry_point, *args,
**kwargs)
# Initialize node groups.
# TODO(rdopieralski) The same (or very similar) code appears
# multiple times in this dashboard. It should be refactored to
# a function.
for step in self.steps:
if isinstance(step, create_flow.ConfigureNodegroups):
ng_action = step.action
template_ngs = template.node_groups
if 'forms_ids' in request.POST:
continue
ng_action.groups = []
for i, templ_ng in enumerate(template_ngs):
group_name = "group_name_%d" % i
template_id = "template_id_%d" % i
count = "count_%d" % i
serialized = "serialized_%d" % i
# save the original node group with all its fields in
# case the template id is missing
serialized_val = base64.urlsafe_b64encode(
json.dumps(wf_helpers.clean_node_group(templ_ng)))
ng = {
"name": templ_ng["name"],
"count": templ_ng["count"],
"id": i,
"deletable": "true",
"serialized": serialized_val
}
if "node_group_template_id" in templ_ng:
ng["template_id"] = templ_ng[
"node_group_template_id"]
ng_action.groups.append(ng)
wf_helpers.build_node_group_fields(
ng_action, group_name, template_id, count,
serialized)
elif isinstance(step, create_flow.GeneralConfig):
fields = step.action.fields
fields["cluster_template_name"].initial = (
template.name + "-copy")
fields["description"].initial = template.description
except Exception:
exceptions.handle(request,
_("Unable to fetch template to copy."))
|
Daniex/horizon
|
openstack_dashboard/dashboards/project/data_processing/cluster_templates/workflows/copy.py
|
Python
|
apache-2.0
| 4,189
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
class Issue131(Framework.TestCase): # https://github.com/jacquev6/PyGithub/pull/133
def setUp(self):
Framework.TestCase.setUp(self)
self.user = self.g.get_user()
self.repo = self.g.get_user("openmicroscopy").get_repo("ome-documentation")
def testGetPullWithOrgHeadUser(self):
user = self.repo.get_pull(204).head.user
self.assertEqual(user.login, 'imcf')
self.assertEqual(user.type, 'Organization')
self.assertEqual(user.__class__.__name__, 'NamedUser') # Should be Organization
def testGetPullsWithOrgHeadUser(self):
for pull in self.repo.get_pulls('closed'):
if pull.number == 204:
user = pull.head.user
self.assertEqual(user, None)
# Should be:
# self.assertEqual(user.login, 'imcf')
# self.assertEqual(user.type, 'Organization')
# self.assertEqual(user.__class__.__name__, 'NamedUser') # Should be Organization
break
else:
self.assertTrue(False)
|
ARMmbed/yotta_osx_installer
|
workspace/lib/python2.7/site-packages/github/tests/Issue131.py
|
Python
|
apache-2.0
| 2,739
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
def save_checkpoint(prefix, epoch, arg_params, aux_params):
"""Checkpoint the model data into file.
:param prefix: Prefix of model name.
:param epoch: The epoch number of the model.
:param arg_params: dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
:param aux_params: dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
:return: None
prefix-epoch.params will be saved for parameters.
"""
save_dict = {('arg:%s' % k) : v for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
mx.nd.save(param_name, save_dict)
|
Mega-DatA-Lab/mxnet
|
example/rcnn/rcnn/utils/save_model.py
|
Python
|
apache-2.0
| 1,548
|
#! /usr/bin/env python
# A Python program implementing rmt, an application for remotely
# controlling other Tk applications.
# Cf. Ousterhout, Tcl and the Tk Toolkit, Figs. 27.5-8, pp. 273-276.
# Note that because of forward references in the original, we
# sometimes delay bindings until after the corresponding procedure is
# defined. We also introduce names for some unnamed code blocks in
# the original because of restrictions on lambda forms in Python.
# XXX This should be written in a more Python-like style!!!
from Tkinter import *
import sys
# 1. Create basic application structure: menu bar on top of
# text widget, scrollbar on right.
root = Tk()
tk = root.tk
mBar = Frame(root, relief=RAISED, borderwidth=2)
mBar.pack(fill=X)
f = Frame(root)
f.pack(expand=1, fill=BOTH)
s = Scrollbar(f, relief=FLAT)
s.pack(side=RIGHT, fill=Y)
t = Text(f, relief=RAISED, borderwidth=2, yscrollcommand=s.set, setgrid=1)
t.pack(side=LEFT, fill=BOTH, expand=1)
t.tag_config('bold', font='-Adobe-Courier-Bold-R-Normal-*-120-*')
s['command'] = t.yview
root.title('Tk Remote Controller')
root.iconname('Tk Remote')
# 2. Create menu button and menus.
file = Menubutton(mBar, text='File', underline=0)
file.pack(side=LEFT)
file_m = Menu(file)
file['menu'] = file_m
file_m_apps = Menu(file_m, tearoff=0)
file_m.add_cascade(label='Select Application', underline=0,
menu=file_m_apps)
file_m.add_command(label='Quit', underline=0, command=sys.exit)
# 3. Create bindings for text widget to allow commands to be
# entered and information to be selected. New characters
# can only be added at the end of the text (can't ever move
# insertion point).
def single1(e):
x = e.x
y = e.y
t.setvar('tk_priv(selectMode)', 'char')
t.mark_set('anchor', At(x, y))
# Should focus W
t.bind('<1>', single1)
def double1(e):
x = e.x
y = e.y
t.setvar('tk_priv(selectMode)', 'word')
t.tk_textSelectTo(At(x, y))
t.bind('<Double-1>', double1)
def triple1(e):
x = e.x
y = e.y
t.setvar('tk_priv(selectMode)', 'line')
t.tk_textSelectTo(At(x, y))
t.bind('<Triple-1>', triple1)
def returnkey(e):
t.insert(AtInsert(), '\n')
invoke()
t.bind('<Return>', returnkey)
def controlv(e):
t.insert(AtInsert(), t.selection_get())
t.yview_pickplace(AtInsert())
if t.index(AtInsert())[-2:] == '.0':
invoke()
t.bind('<Control-v>', controlv)
# 4. Procedure to backspace over one character, as long as
# the character isn't part of the prompt.
def backspace(e):
if t.index('promptEnd') != t.index('insert - 1 char'):
t.delete('insert - 1 char', AtInsert())
t.yview_pickplace(AtInsert())
t.bind('<BackSpace>', backspace)
t.bind('<Control-h>', backspace)
t.bind('<Delete>', backspace)
# 5. Procedure that's invoked when return is typed: if
# there's not yet a complete command (e.g. braces are open)
# then do nothing. Otherwise, execute command (locally or
# remotely), output the result or error message, and issue
# a new prompt.
def invoke():
cmd = t.get('promptEnd + 1 char', AtInsert())
if t.getboolean(tk.call('info', 'complete', cmd)): # XXX
if app == root.winfo_name():
msg = tk.call('eval', cmd) # XXX
else:
msg = t.send(app, cmd)
if msg:
t.insert(AtInsert(), msg + '\n')
prompt()
t.yview_pickplace(AtInsert())
def prompt():
t.insert(AtInsert(), app + ': ')
t.mark_set('promptEnd', 'insert - 1 char')
t.tag_add('bold', 'insert linestart', 'promptEnd')
# 6. Procedure to select a new application. Also changes
# the prompt on the current command line to reflect the new
# name.
def newApp(appName):
global app
app = appName
t.delete('promptEnd linestart', 'promptEnd')
t.insert('promptEnd', appName + ':')
t.tag_add('bold', 'promptEnd linestart', 'promptEnd')
def fillAppsMenu():
file_m_apps.add('command')
file_m_apps.delete(0, 'last')
names = root.winfo_interps()
names = list(names)
names.sort()
for name in names:
try:
root.send(name, 'winfo name .')
except TclError:
# Inoperative window -- ignore it
pass
else:
file_m_apps.add_command(
label=name,
command=lambda name=name: newApp(name))
file_m_apps['postcommand'] = fillAppsMenu
mBar.tk_menuBar(file)
# 7. Miscellaneous initialization.
app = root.winfo_name()
prompt()
t.focus()
root.mainloop()
|
teeple/pns_server
|
work/install/Python-2.7.4/Demo/tkinter/guido/rmt.py
|
Python
|
gpl-2.0
| 4,492
|
# $Id$
import inc_sip as sip
import inc_sdp as sdp
# Some non-SIP URI's in Contact header
#
complete_msg = \
"""SUBSCRIBE sip:localhost SIP/2.0
Via: SIP/2.0/UDP 192.168.0.14:5060;rport;branch=z9hG4bKPj9db9
Max-Forwards: 70
From: <sip:192.168.0.14>;tag=08cd5bfc2d8a4fddb1f5e59c6961d298
To: <sip:localhost>
Call-ID: 3373d9eb32aa458db7e69c7ea51e0bd7
CSeq: 0 SUBSCRIBE
Contact: mailto:dontspam@pjsip.org
Contact: <mailto:dontspam@pjsip.org>
Contact: http://www.pjsip.org/the%20path.cgi?pname=pvalue
Event: presence
Expires: 600
Accept: application/pidf+xml, application/xpidf+xml
Allow-Events: presence, refer
User-Agent: PJSUA v0.9.0-trunk/win32
Content-Length: 0
"""
sendto_cfg = sip.SendtoCfg( "Non SIP URI in Contact",
"--null-audio --auto-answer 200",
"", 400, complete_msg=complete_msg)
|
xiejianying/pjsip
|
tests/pjsua/scripts-sendto/364_non_sip_uri_subscribe.py
|
Python
|
gpl-2.0
| 809
|
from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.utils.html import strip_tags
import json
from xmodule_django.models import CourseKeyField
class Note(models.Model):
user = models.ForeignKey(User, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
uri = models.CharField(max_length=255, db_index=True)
text = models.TextField(default="")
quote = models.TextField(default="")
range_start = models.CharField(max_length=2048) # xpath string
range_start_offset = models.IntegerField()
range_end = models.CharField(max_length=2048) # xpath string
range_end_offset = models.IntegerField()
tags = models.TextField(default="") # comma-separated string
created = models.DateTimeField(auto_now_add=True, null=True, db_index=True)
updated = models.DateTimeField(auto_now=True, db_index=True)
class Meta:
app_label = 'notes'
def clean(self, json_body):
"""
Cleans the note object or raises a ValidationError.
"""
if json_body is None:
raise ValidationError('Note must have a body.')
body = json.loads(json_body)
if not isinstance(body, dict):
raise ValidationError('Note body must be a dictionary.')
# NOTE: all three of these fields should be considered user input
# and may be output back to the user, so we need to sanitize them.
# These fields should only contain _plain text_.
self.uri = strip_tags(body.get('uri', ''))
self.text = strip_tags(body.get('text', ''))
self.quote = strip_tags(body.get('quote', ''))
ranges = body.get('ranges')
if ranges is None or len(ranges) != 1:
raise ValidationError('Note must contain exactly one range.')
self.range_start = ranges[0]['start']
self.range_start_offset = ranges[0]['startOffset']
self.range_end = ranges[0]['end']
self.range_end_offset = ranges[0]['endOffset']
self.tags = ""
tags = [strip_tags(tag) for tag in body.get('tags', [])]
if len(tags) > 0:
self.tags = ",".join(tags)
def get_absolute_url(self):
"""
Returns the absolute url for the note object.
"""
# pylint: disable=no-member
kwargs = {'course_id': self.course_id.to_deprecated_string(), 'note_id': str(self.pk)}
return reverse('notes_api_note', kwargs=kwargs)
def as_dict(self):
"""
Returns the note object as a dictionary.
"""
return {
'id': self.pk,
'user_id': self.user.pk,
'uri': self.uri,
'text': self.text,
'quote': self.quote,
'ranges': [{
'start': self.range_start,
'startOffset': self.range_start_offset,
'end': self.range_end,
'endOffset': self.range_end_offset
}],
'tags': self.tags.split(","),
'created': str(self.created),
'updated': str(self.updated)
}
|
solashirai/edx-platform
|
lms/djangoapps/notes/models.py
|
Python
|
agpl-3.0
| 3,201
|
# sybase/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.sybase import base, pysybase, pyodbc
# default dialect
base.dialect = pyodbc.dialect
from .base import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, UNITEXT, UNICHAR, UNIVARCHAR,\
IMAGE, BIT, MONEY, SMALLMONEY, TINYINT,\
dialect
__all__ = (
'CHAR', 'VARCHAR', 'TIME', 'NCHAR', 'NVARCHAR',
'TEXT', 'DATE', 'DATETIME', 'FLOAT', 'NUMERIC',
'BIGINT', 'INT', 'INTEGER', 'SMALLINT', 'BINARY',
'VARBINARY', 'UNITEXT', 'UNICHAR', 'UNIVARCHAR',
'IMAGE', 'BIT', 'MONEY', 'SMALLMONEY', 'TINYINT',
'dialect'
)
|
pcu4dros/pandora-core
|
workspace/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/__init__.py
|
Python
|
mit
| 894
|
#! /usr/bin/env python
# Copyright (c) 2011 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Giacomo Gabrielli
# Pipeline activity viewer for the O3 CPU model.
import optparse
import os
import sys
import copy
# Temporary storage for instructions. The queue is filled in out-of-order
# until it reaches 'max_threshold' number of instructions. It is then
# sorted out and instructions are printed out until their number drops to
# 'min_threshold'.
# It is assumed that the instructions are not out of order for more then
# 'min_threshold' places - otherwise they will appear out of order.
insts = {
'queue': [] , # Instructions to print.
'max_threshold':2000, # Instructions are sorted out and printed when
# their number reaches this threshold.
'min_threshold':1000, # Printing stops when this number is reached.
'sn_start':0, # The first instruction seq. number to be printed.
'sn_stop':0, # The last instruction seq. number to be printed.
'tick_start':0, # The first tick to be printed
'tick_stop':0, # The last tick to be printed
'tick_drift':2000, # Used to calculate the start and the end of main
# loop. We assume here that the instructions are not
# out of order for more then 2000 CPU ticks,
# otherwise the print may not start/stop
# at the time specified by tick_start/stop.
'only_committed':0, # Set if only committed instructions are printed.
}
def process_trace(trace, outfile, cycle_time, width, color, timestamps,
committed_only, store_completions, start_tick, stop_tick, start_sn, stop_sn):
global insts
insts['sn_start'] = start_sn
insts['sn_stop'] = stop_sn
insts['tick_start'] = start_tick
insts['tick_stop'] = stop_tick
insts['tick_drift'] = insts['tick_drift'] * cycle_time
insts['only_committed'] = committed_only
line = None
fields = None
# Skip lines up to the starting tick
if start_tick != 0:
while True:
line = trace.readline()
if not line: return
fields = line.split(':')
if fields[0] != 'O3PipeView': continue
if int(fields[2]) >= start_tick: break
elif start_sn != 0:
while True:
line = trace.readline()
if not line: return
fields = line.split(':')
if fields[0] != 'O3PipeView': continue
if fields[1] == 'fetch' and int(fields[5]) >= start_sn: break
else:
line = trace.readline()
if not line: return
fields = line.split(':')
# Skip lines up to next instruction fetch
while fields[0] != 'O3PipeView' or fields[1] != 'fetch':
line = trace.readline()
if not line: return
fields = line.split(':')
# Print header
outfile.write('// f = fetch, d = decode, n = rename, p = dispatch, '
'i = issue, c = complete, r = retire')
if store_completions:
outfile.write(', s = store-complete')
outfile.write('\n\n')
outfile.write(' ' + 'timeline'.center(width) +
' ' + 'tick'.center(15) +
' ' + 'pc.upc'.center(12) +
' ' + 'disasm'.ljust(25) +
' ' + 'seq_num'.center(10))
if timestamps:
outfile.write('timestamps'.center(25))
outfile.write('\n')
# Region of interest
curr_inst = {}
while True:
if fields[0] == 'O3PipeView':
curr_inst[fields[1]] = int(fields[2])
if fields[1] == 'fetch':
if ((stop_tick > 0 and int(fields[2]) > stop_tick+insts['tick_drift']) or
(stop_sn > 0 and int(fields[5]) > (stop_sn+insts['max_threshold']))):
print_insts(outfile, cycle_time, width, color, timestamps, 0)
return
(curr_inst['pc'], curr_inst['upc']) = fields[3:5]
curr_inst['sn'] = int(fields[5])
curr_inst['disasm'] = ' '.join(fields[6][:-1].split())
elif fields[1] == 'retire':
if curr_inst['retire'] == 0:
curr_inst['disasm'] = '-----' + curr_inst['disasm']
if store_completions:
curr_inst[fields[3]] = int(fields[4])
queue_inst(outfile, curr_inst, cycle_time, width, color, timestamps, store_completions)
line = trace.readline()
if not line:
print_insts(outfile, cycle_time, width, color, timestamps, store_completions, 0)
return
fields = line.split(':')
#Sorts out instructions according to sequence number
def compare_by_sn(a, b):
return cmp(a['sn'], b['sn'])
# Puts new instruction into the print queue.
# Sorts out and prints instructions when their number reaches threshold value
def queue_inst(outfile, inst, cycle_time, width, color, timestamps, store_completions):
global insts
l_copy = copy.deepcopy(inst)
insts['queue'].append(l_copy)
if len(insts['queue']) > insts['max_threshold']:
print_insts(outfile, cycle_time, width, color, timestamps, store_completions, insts['min_threshold'])
# Sorts out and prints instructions in print queue
def print_insts(outfile, cycle_time, width, color, timestamps, store_completions, lower_threshold):
global insts
insts['queue'].sort(compare_by_sn)
while len(insts['queue']) > lower_threshold:
print_item=insts['queue'].pop(0)
# As the instructions are processed out of order the main loop starts
# earlier then specified by start_sn/tick and finishes later then what
# is defined in stop_sn/tick.
# Therefore, here we have to filter out instructions that reside out of
# the specified boundaries.
if (insts['sn_start'] > 0 and print_item['sn'] < insts['sn_start']):
continue; # earlier then the starting sequence number
if (insts['sn_stop'] > 0 and print_item['sn'] > insts['sn_stop']):
continue; # later then the ending sequence number
if (insts['tick_start'] > 0 and print_item['fetch'] < insts['tick_start']):
continue; # earlier then the starting tick number
if (insts['tick_stop'] > 0 and print_item['fetch'] > insts['tick_stop']):
continue; # later then the ending tick number
if (insts['only_committed'] != 0 and print_item['retire'] == 0):
continue; # retire is set to zero if it hasn't been completed
print_inst(outfile, print_item, cycle_time, width, color, timestamps, store_completions)
# Prints a single instruction
def print_inst(outfile, inst, cycle_time, width, color, timestamps, store_completions):
if color:
from m5.util.terminal import termcap
else:
from m5.util.terminal import no_termcap as termcap
# Pipeline stages
stages = [{'name': 'fetch',
'color': termcap.Blue + termcap.Reverse,
'shorthand': 'f'},
{'name': 'decode',
'color': termcap.Yellow + termcap.Reverse,
'shorthand': 'd'},
{'name': 'rename',
'color': termcap.Magenta + termcap.Reverse,
'shorthand': 'n'},
{'name': 'dispatch',
'color': termcap.Green + termcap.Reverse,
'shorthand': 'p'},
{'name': 'issue',
'color': termcap.Red + termcap.Reverse,
'shorthand': 'i'},
{'name': 'complete',
'color': termcap.Cyan + termcap.Reverse,
'shorthand': 'c'},
{'name': 'retire',
'color': termcap.Blue + termcap.Reverse,
'shorthand': 'r'}
]
if store_completions:
stages.append(
{'name': 'store',
'color': termcap.Yellow + termcap.Reverse,
'shorthand': 's'})
# Print
time_width = width * cycle_time
base_tick = (inst['fetch'] / time_width) * time_width
# Find out the time of the last event - it may not
# be 'retire' if the instruction is not comlpeted.
last_event_time = max(inst['fetch'], inst['decode'],inst['rename'],
inst['dispatch'],inst['issue'], inst['complete'], inst['retire'])
if store_completions:
last_event_time = max(last_event_time, inst['store'])
# Timeline shorter then time_width is printed in compact form where
# the print continues at the start of the same line.
if ((last_event_time - inst['fetch']) < time_width):
num_lines = 1 # compact form
else:
num_lines = ((last_event_time - base_tick) / time_width) + 1
curr_color = termcap.Normal
# This will visually distinguish completed and abandoned intructions.
if inst['retire'] == 0: dot = '=' # abandoned instruction
else: dot = '.' # completed instruction
for i in range(num_lines):
start_tick = base_tick + i * time_width
end_tick = start_tick + time_width
if num_lines == 1: # compact form
end_tick += (inst['fetch'] - base_tick)
events = []
for stage_idx in range(len(stages)):
tick = inst[stages[stage_idx]['name']]
if tick != 0:
if tick >= start_tick and tick < end_tick:
events.append((tick % time_width,
stages[stage_idx]['name'],
stage_idx, tick))
events.sort()
outfile.write('[')
pos = 0
if num_lines == 1 and events[0][2] != 0: # event is not fetch
curr_color = stages[events[0][2] - 1]['color']
for event in events:
if (stages[event[2]]['name'] == 'dispatch' and
inst['dispatch'] == inst['issue']):
continue
outfile.write(curr_color + dot * ((event[0] / cycle_time) - pos))
outfile.write(stages[event[2]]['color'] +
stages[event[2]]['shorthand'])
if event[3] != last_event_time: # event is not the last one
curr_color = stages[event[2]]['color']
else:
curr_color = termcap.Normal
pos = (event[0] / cycle_time) + 1
outfile.write(curr_color + dot * (width - pos) + termcap.Normal +
']-(' + str(base_tick + i * time_width).rjust(15) + ') ')
if i == 0:
outfile.write('%s.%s %s [%s]' % (
inst['pc'].rjust(10),
inst['upc'],
inst['disasm'].ljust(25),
str(inst['sn']).rjust(10)))
if timestamps:
outfile.write(' f=%s, r=%s' % (inst['fetch'], inst['retire']))
outfile.write('\n')
else:
outfile.write('...'.center(12) + '\n')
def validate_range(my_range):
my_range = [int(i) for i in my_range.split(':')]
if (len(my_range) != 2 or
my_range[0] < 0 or
my_range[1] > 0 and my_range[0] >= my_range[1]):
return None
return my_range
def main():
# Parse options
usage = ('%prog [OPTION]... TRACE_FILE')
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'-o',
dest='outfile',
default=os.path.join(os.getcwd(), 'o3-pipeview.out'),
help="output file (default: '%default')")
parser.add_option(
'-t',
dest='tick_range',
default='0:-1',
help="tick range (default: '%default'; -1 == inf.)")
parser.add_option(
'-i',
dest='inst_range',
default='0:-1',
help="instruction range (default: '%default'; -1 == inf.)")
parser.add_option(
'-w',
dest='width',
type='int', default=80,
help="timeline width (default: '%default')")
parser.add_option(
'--color',
action='store_true', default=False,
help="enable colored output (default: '%default')")
parser.add_option(
'-c', '--cycle-time',
type='int', default=1000,
help="CPU cycle time in ticks (default: '%default')")
parser.add_option(
'--timestamps',
action='store_true', default=False,
help="print fetch and retire timestamps (default: '%default')")
parser.add_option(
'--only_committed',
action='store_true', default=False,
help="display only committed (completed) instructions (default: '%default')")
parser.add_option(
'--store_completions',
action='store_true', default=False,
help="additionally display store completion ticks (default: '%default')")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
sys.exit(1)
tick_range = validate_range(options.tick_range)
if not tick_range:
parser.error('invalid range')
sys.exit(1)
inst_range = validate_range(options.inst_range)
if not inst_range:
parser.error('invalid range')
sys.exit(1)
# Process trace
print 'Processing trace... ',
with open(args[0], 'r') as trace:
with open(options.outfile, 'w') as out:
process_trace(trace, out, options.cycle_time, options.width,
options.color, options.timestamps,
options.only_committed, options.store_completions,
*(tick_range + inst_range))
print 'done!'
if __name__ == '__main__':
sys.path.append(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'src', 'python'))
main()
|
tiagormk/gem5-hmp
|
util/o3-pipeview.py
|
Python
|
bsd-3-clause
| 15,788
|
# Copyright 2010 Google Inc.
# All Rights Reserved.
#
# Author: Tim Haloun (thaloun@google.com)
# Daniel Petersson (dape@google.com)
#
import os
# Keep a global dictionary of library target params for lookups in
# ExtendComponent().
_all_lib_targets = {}
def _GenericLibrary(env, static, **kwargs):
"""Extends ComponentLibrary to support multiplatform builds
of dynamic or static libraries.
Args:
env: The environment object.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentLibrary
"""
params = CombineDicts(kwargs, {'COMPONENT_STATIC': static})
return ExtendComponent(env, 'ComponentLibrary', **params)
def Library(env, **kwargs):
"""Extends ComponentLibrary to support multiplatform builds of static
libraries.
Args:
env: The current environment.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentLibrary
"""
return _GenericLibrary(env, True, **kwargs)
def DynamicLibrary(env, **kwargs):
"""Extends ComponentLibrary to support multiplatform builds
of dynmic libraries.
Args:
env: The environment object.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentLibrary
"""
return _GenericLibrary(env, False, **kwargs)
def Object(env, **kwargs):
return ExtendComponent(env, 'ComponentObject', **kwargs)
def Unittest(env, **kwargs):
"""Extends ComponentTestProgram to support unittest built
for multiple platforms.
Args:
env: The current environment.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentProgram.
"""
kwargs['name'] = kwargs['name'] + '_unittest'
common_test_params = {
'posix_cppdefines': ['GUNIT_NO_GOOGLE3', 'GTEST_HAS_RTTI=0'],
'libs': ['unittest_main', 'gunit']
}
if not kwargs.has_key('explicit_libs'):
common_test_params['win_libs'] = [
'advapi32',
'crypt32',
'iphlpapi',
'secur32',
'shell32',
'shlwapi',
'user32',
'wininet',
'ws2_32'
]
common_test_params['lin_libs'] = [
'crypto',
'pthread',
'ssl',
]
params = CombineDicts(kwargs, common_test_params)
return ExtendComponent(env, 'ComponentTestProgram', **params)
def App(env, **kwargs):
"""Extends ComponentProgram to support executables with platform specific
options.
Args:
env: The current environment.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentProgram.
"""
if not kwargs.has_key('explicit_libs'):
common_app_params = {
'win_libs': [
'advapi32',
'crypt32',
'iphlpapi',
'secur32',
'shell32',
'shlwapi',
'user32',
'wininet',
'ws2_32'
]}
params = CombineDicts(kwargs, common_app_params)
else:
params = kwargs
return ExtendComponent(env, 'ComponentProgram', **params)
def WiX(env, **kwargs):
""" Extends the WiX builder
Args:
env: The current environment.
kwargs: The keyword arguments.
Returns:
The node produced by the environment's wix builder
"""
return ExtendComponent(env, 'WiX', **kwargs)
def Repository(env, at, path):
"""Maps a directory external to $MAIN_DIR to the given path so that sources
compiled from it end up in the correct place under $OBJ_DIR. NOT required
when only referring to header files.
Args:
env: The current environment object.
at: The 'mount point' within the current directory.
path: Path to the actual directory.
"""
env.Dir(at).addRepository(env.Dir(path))
def Components(*paths):
"""Completes the directory paths with the correct file
names such that the directory/directory.scons name
convention can be used.
Args:
paths: The paths to complete. If it refers to an existing
file then it is ignored.
Returns:
The completed lif scons files that are needed to build talk.
"""
files = []
for path in paths:
if os.path.isfile(path):
files.append(path)
else:
files.append(ExpandSconsPath(path))
return files
def ExpandSconsPath(path):
"""Expands a directory path into the path to the
scons file that our build uses.
Ex: magiflute/plugin/common => magicflute/plugin/common/common.scons
Args:
path: The directory path to expand.
Returns:
The expanded path.
"""
return '%s/%s.scons' % (path, os.path.basename(path))
def AddMediaLibs(env, **kwargs):
lmi_libdir = '$GOOGLE3/../googleclient/third_party/lmi/files/lib/'
if env.Bit('windows'):
if env.get('COVERAGE_ENABLED'):
lmi_libdir += 'win32/c_only'
else:
lmi_libdir += 'win32/Release'
elif env.Bit('mac'):
lmi_libdir += 'macos'
elif env.Bit('linux'):
lmi_libdir += 'linux/x86'
AddToDict(kwargs, 'libdirs', [
'$MAIN_DIR/third_party/gips/Libraries/',
lmi_libdir,
])
gips_lib = ''
if env.Bit('windows'):
if env.Bit('debug'):
gips_lib = 'gipsvoiceenginelib_mtd'
else:
gips_lib = 'gipsvoiceenginelib_mt'
elif env.Bit('mac'):
gips_lib = 'VoiceEngine_mac_universal_gcc'
elif env.Bit('linux'):
gips_lib = 'VoiceEngine_Linux_gcc'
AddToDict(kwargs, 'libs', [
gips_lib,
'LmiAudioCommon',
'LmiClient',
'LmiCmcp',
'LmiDeviceManager',
'LmiH263ClientPlugIn',
'LmiH263CodecCommon',
'LmiH263Decoder',
'LmiH263Encoder',
'LmiH264ClientPlugIn',
'LmiH264CodecCommon',
'LmiH264Common',
'LmiH264Decoder',
'LmiH264Encoder',
'LmiIce',
'LmiMediaPayload',
'LmiOs',
'LmiPacketCache',
'LmiProtocolStack',
'LmiRateShaper',
'LmiRtp',
'LmiSecurity',
'LmiSignaling',
'LmiStun',
'LmiTransport',
'LmiUi',
'LmiUtils',
'LmiVideoCommon',
'LmiXml',
])
if env.Bit('windows'):
AddToDict(kwargs, 'libs', [
'dsound',
'd3d9',
'gdi32',
'strmiids',
])
if env.Bit('mac'):
AddToDict(kwargs, 'FRAMEWORKS', [
'AudioToolbox',
'AudioUnit',
'Cocoa',
'CoreAudio',
'CoreFoundation',
'IOKit',
'QTKit',
'QuickTime',
'QuartzCore',
])
return kwargs
def ReadVersion(filename):
"""Executes the supplied file and pulls out a version definition from it. """
defs = {}
execfile(str(filename), defs)
if not defs.has_key('version'):
return '0.0.0.0'
version = defs['version']
parts = version.split(',')
build = os.environ.get('GOOGLE_VERSION_BUILDNUMBER')
if build:
parts[-1] = str(build)
return '.'.join(parts)
#-------------------------------------------------------------------------------
# Helper methods for translating talk.Foo() declarations in to manipulations of
# environmuent construction variables, including parameter parsing and merging,
#
def GetEntry(dict, key):
"""Get the value from a dictionary by key. If the key
isn't in the dictionary then None is returned. If it is in
the dictionaruy the value is fetched and then is it removed
from the dictionary.
Args:
key: The key to get the value for.
kwargs: The keyword argument dictionary.
Returns:
The value or None if the key is missing.
"""
value = None
if dict.has_key(key):
value = dict[key]
dict.pop(key)
return value
def MergeAndFilterByPlatform(env, params):
"""Take a dictionary of arguments to lists of values, and, depending on
which platform we are targetting, merge the lists of associated keys.
Merge by combining value lists like so:
{win_foo = [a,b], lin_foo = [c,d], foo = [e], mac_bar = [f], bar = [g] }
becomes {foo = [a,b,e], bar = [g]} on windows, and
{foo = [e], bar = [f,g]} on mac
Args:
env: The hammer environment which knows which platforms are active
params: The keyword argument dictionary.
Returns:
A new dictionary with the filtered and combined entries of params
"""
platforms = {
'linux': 'lin_',
'mac': 'mac_',
'posix': 'posix_',
'windows': 'win_',
}
active_prefixes = [
platforms[x] for x in iter(platforms) if env.Bit(x)
]
inactive_prefixes = [
platforms[x] for x in iter(platforms) if not env.Bit(x)
]
merged = {}
for arg, values in params.iteritems():
inactive_platform = False
key = arg
for prefix in active_prefixes:
if arg.startswith(prefix):
key = arg[len(prefix):]
for prefix in inactive_prefixes:
if arg.startswith(prefix):
inactive_platform = True
if inactive_platform:
continue
AddToDict(merged, key, values)
return merged
# Linux can build both 32 and 64 bit on 64 bit host, but 32 bit host can
# only build 32 bit. For 32 bit debian installer a 32 bit host is required.
# ChromeOS (linux) ebuild don't support 64 bit and requires 32 bit build only
# for now.
def Allow64BitCompile(env):
return (env.Bit('linux') and env.Bit('platform_arch_64bit')
)
def MergeSettingsFromLibraryDependencies(env, params):
if params.has_key('libs'):
for lib in params['libs']:
if (_all_lib_targets.has_key(lib) and
_all_lib_targets[lib].has_key('dependent_target_settings')):
params = CombineDicts(
params,
MergeAndFilterByPlatform(
env,
_all_lib_targets[lib]['dependent_target_settings']))
return params
def ExtendComponent(env, component, **kwargs):
"""A wrapper around a scons builder function that preprocesses and post-
processes its inputs and outputs. For example, it merges and filters
certain keyword arguments before appending them to the environments
construction variables. It can build signed targets and 64bit copies
of targets as well.
Args:
env: The hammer environment with which to build the target
component: The environment's builder function, e.g. ComponentProgram
kwargs: keyword arguments that are either merged, translated, and passed on
to the call to component, or which control execution.
TODO(): Document the fields, such as cppdefines->CPPDEFINES,
prepend_includedirs, include_talk_media_libs, etc.
Returns:
The output node returned by the call to component, or a subsequent signed
dependant node.
"""
env = env.Clone()
# prune parameters intended for other platforms, then merge
params = MergeAndFilterByPlatform(env, kwargs)
# get the 'target' field
name = GetEntry(params, 'name')
# save pristine params of lib targets for future reference
if 'ComponentLibrary' == component:
_all_lib_targets[name] = dict(params)
# add any dependent target settings from library dependencies
params = MergeSettingsFromLibraryDependencies(env, params)
# if this is a signed binary we need to make an unsigned version first
signed = env.Bit('windows') and GetEntry(params, 'signed')
if signed:
name = 'unsigned_' + name
# add default values
if GetEntry(params, 'include_talk_media_libs'):
params = AddMediaLibs(env, **params)
# potentially exit now
srcs = GetEntry(params, 'srcs')
if not srcs or not hasattr(env, component):
return None
# apply any explicit dependencies
dependencies = GetEntry(params, 'depends')
if dependencies is not None:
env.Depends(name, dependencies)
# put the contents of params into the environment
# some entries are renamed then appended, others renamed then prepended
appends = {
'cppdefines' : 'CPPDEFINES',
'libdirs' : 'LIBPATH',
'link_flags' : 'LINKFLAGS',
'libs' : 'LIBS',
'FRAMEWORKS' : 'FRAMEWORKS',
}
prepends = {}
if env.Bit('windows'):
# MSVC compile flags have precedence at the beginning ...
prepends['ccflags'] = 'CCFLAGS'
else:
# ... while GCC compile flags have precedence at the end
appends['ccflags'] = 'CCFLAGS'
if GetEntry(params, 'prepend_includedirs'):
prepends['includedirs'] = 'CPPPATH'
else:
appends['includedirs'] = 'CPPPATH'
for field, var in appends.items():
values = GetEntry(params, field)
if values is not None:
env.Append(**{var : values})
for field, var in prepends.items():
values = GetEntry(params, field)
if values is not None:
env.Prepend(**{var : values})
# workaround for pulse stripping link flag for unknown reason
if Allow64BitCompile(env):
env['SHLINKCOM'] = ('$SHLINK -o $TARGET -m32 $SHLINKFLAGS $SOURCES '
'$_LIBDIRFLAGS $_LIBFLAGS')
env['LINKCOM'] = ('$LINK -o $TARGET -m32 $LINKFLAGS $SOURCES '
'$_LIBDIRFLAGS $_LIBFLAGS')
# any other parameters are replaced without renaming
for field, value in params.items():
env.Replace(**{field : value})
# invoke the builder function
builder = getattr(env, component)
node = builder(name, srcs)
# make a parallel 64bit version if requested
if Allow64BitCompile(env) and GetEntry(params, 'also64bit'):
env_64bit = env.Clone()
env_64bit.FilterOut(CCFLAGS = ['-m32'], LINKFLAGS = ['-m32'])
env_64bit.Prepend(CCFLAGS = ['-m64', '-fPIC'], LINKFLAGS = ['-m64'])
name_64bit = name + '64'
env_64bit.Replace(OBJSUFFIX = '64' + env_64bit['OBJSUFFIX'])
env_64bit.Replace(SHOBJSUFFIX = '64' + env_64bit['SHOBJSUFFIX'])
if ('ComponentProgram' == component or
('ComponentLibrary' == component and
env_64bit['COMPONENT_STATIC'] == False)):
# link 64 bit versions of libraries
libs = []
for lib in env_64bit['LIBS']:
if (_all_lib_targets.has_key(lib) and
_all_lib_targets[lib].has_key('also64bit')):
libs.append(lib + '64')
else:
libs.append(lib)
env_64bit.Replace(LIBS = libs)
env_64bit['SHLINKCOM'] = ('$SHLINK -o $TARGET -m64 $SHLINKFLAGS $SOURCES '
'$_LIBDIRFLAGS $_LIBFLAGS')
env_64bit['LINKCOM'] = ('$LINK -o $TARGET -m64 $LINKFLAGS $SOURCES '
'$_LIBDIRFLAGS $_LIBFLAGS')
builder = getattr(env_64bit, component)
nodes = [node, builder(name_64bit, srcs)]
return nodes
if signed: # Note currently incompatible with 64Bit flag
# Get the name of the built binary, then get the name of the final signed
# version from it. We need the output path since we don't know the file
# extension beforehand.
target = node[0].path.split('_', 1)[1]
signed_node = env.SignedBinary(
source = node,
target = '$STAGING_DIR/' + target,
)
env.Alias('signed_binaries', signed_node)
return signed_node
return node
def AddToDict(dictionary, key, values, append=True):
"""Merge the given key value(s) pair into a dictionary. If it contains an
entry with that key already, then combine by appending or prepending the
values as directed. Otherwise, assign a new keyvalue pair.
"""
if values is None:
return
if not dictionary.has_key(key):
dictionary[key] = values
return
cur = dictionary[key]
# TODO: Make sure that there are no duplicates
# in the list. I can't use python set for this since
# the nodes that are returned by the SCONS builders
# are not hashable.
# dictionary[key] = list(set(cur).union(set(values)))
if append:
dictionary[key] = cur + values
else:
dictionary[key] = values + cur
def CombineDicts(a, b):
"""Unions two dictionaries by combining values of keys shared between them.
"""
c = {}
for key in a:
if b.has_key(key):
c[key] = a[key] + b.pop(key)
else:
c[key] = a[key]
for key in b:
c[key] = b[key]
return c
def RenameKey(d, old, new, append=True):
AddToDict(d, new, GetEntry(d, old), append)
|
opensourcechipspark/platform_external_chromium
|
third_party/libjingle/source/talk/site_scons/talk.py
|
Python
|
bsd-3-clause
| 15,628
|
from openid import message
from openid import oidutil
from openid.extensions import sreg
import urllib
import cgi
import unittest
def mkGetArgTest(ns, key, expected=None):
def test(self):
a_default = object()
self.failUnlessEqual(self.msg.getArg(ns, key), expected)
if expected is None:
self.failUnlessEqual(
self.msg.getArg(ns, key, a_default), a_default)
self.failUnlessRaises(
KeyError, self.msg.getArg, ns, key, message.no_default)
else:
self.failUnlessEqual(
self.msg.getArg(ns, key, a_default), expected)
self.failUnlessEqual(
self.msg.getArg(ns, key, message.no_default), expected)
return test
class EmptyMessageTest(unittest.TestCase):
def setUp(self):
self.msg = message.Message()
def test_toPostArgs(self):
self.failUnlessEqual(self.msg.toPostArgs(), {})
def test_toArgs(self):
self.failUnlessEqual(self.msg.toArgs(), {})
def test_toKVForm(self):
self.failUnlessEqual(self.msg.toKVForm(), '')
def test_toURLEncoded(self):
self.failUnlessEqual(self.msg.toURLEncoded(), '')
def test_toURL(self):
base_url = 'http://base.url/'
self.failUnlessEqual(self.msg.toURL(base_url), base_url)
def test_getOpenID(self):
self.failUnlessEqual(self.msg.getOpenIDNamespace(), None)
def test_getKeyOpenID(self):
# Could reasonably return None instead of raising an
# exception. I'm not sure which one is more right, since this
# case should only happen when you're building a message from
# scratch and so have no default namespace.
self.failUnlessRaises(message.UndefinedOpenIDNamespace,
self.msg.getKey, message.OPENID_NS, 'foo')
def test_getKeyBARE(self):
self.failUnlessEqual(self.msg.getKey(message.BARE_NS, 'foo'), 'foo')
def test_getKeyNS1(self):
self.failUnlessEqual(self.msg.getKey(message.OPENID1_NS, 'foo'), None)
def test_getKeyNS2(self):
self.failUnlessEqual(self.msg.getKey(message.OPENID2_NS, 'foo'), None)
def test_getKeyNS3(self):
self.failUnlessEqual(self.msg.getKey('urn:nothing-significant', 'foo'),
None)
def test_hasKey(self):
# Could reasonably return False instead of raising an
# exception. I'm not sure which one is more right, since this
# case should only happen when you're building a message from
# scratch and so have no default namespace.
self.failUnlessRaises(message.UndefinedOpenIDNamespace,
self.msg.hasKey, message.OPENID_NS, 'foo')
def test_hasKeyBARE(self):
self.failUnlessEqual(self.msg.hasKey(message.BARE_NS, 'foo'), False)
def test_hasKeyNS1(self):
self.failUnlessEqual(self.msg.hasKey(message.OPENID1_NS, 'foo'), False)
def test_hasKeyNS2(self):
self.failUnlessEqual(self.msg.hasKey(message.OPENID2_NS, 'foo'), False)
def test_hasKeyNS3(self):
self.failUnlessEqual(self.msg.hasKey('urn:nothing-significant', 'foo'),
False)
def test_getAliasedArgSuccess(self):
msg = message.Message.fromPostArgs({'openid.ns.test': 'urn://foo',
'openid.test.flub': 'bogus'})
actual_uri = msg.getAliasedArg('ns.test', message.no_default)
self.assertEquals("urn://foo", actual_uri)
def test_getAliasedArgFailure(self):
msg = message.Message.fromPostArgs({'openid.test.flub': 'bogus'})
self.assertRaises(KeyError,
msg.getAliasedArg, 'ns.test', message.no_default)
def test_getArg(self):
# Could reasonably return None instead of raising an
# exception. I'm not sure which one is more right, since this
# case should only happen when you're building a message from
# scratch and so have no default namespace.
self.failUnlessRaises(message.UndefinedOpenIDNamespace,
self.msg.getArg, message.OPENID_NS, 'foo')
test_getArgBARE = mkGetArgTest(message.BARE_NS, 'foo')
test_getArgNS1 = mkGetArgTest(message.OPENID1_NS, 'foo')
test_getArgNS2 = mkGetArgTest(message.OPENID2_NS, 'foo')
test_getArgNS3 = mkGetArgTest('urn:nothing-significant', 'foo')
def test_getArgs(self):
# Could reasonably return {} instead of raising an
# exception. I'm not sure which one is more right, since this
# case should only happen when you're building a message from
# scratch and so have no default namespace.
self.failUnlessRaises(message.UndefinedOpenIDNamespace,
self.msg.getArgs, message.OPENID_NS)
def test_getArgsBARE(self):
self.failUnlessEqual(self.msg.getArgs(message.BARE_NS), {})
def test_getArgsNS1(self):
self.failUnlessEqual(self.msg.getArgs(message.OPENID1_NS), {})
def test_getArgsNS2(self):
self.failUnlessEqual(self.msg.getArgs(message.OPENID2_NS), {})
def test_getArgsNS3(self):
self.failUnlessEqual(self.msg.getArgs('urn:nothing-significant'), {})
def test_updateArgs(self):
self.failUnlessRaises(message.UndefinedOpenIDNamespace,
self.msg.updateArgs, message.OPENID_NS,
{'does not':'matter'})
def _test_updateArgsNS(self, ns):
update_args = {
'Camper van Beethoven':'David Lowery',
'Magnolia Electric Co.':'Jason Molina',
}
self.failUnlessEqual(self.msg.getArgs(ns), {})
self.msg.updateArgs(ns, update_args)
self.failUnlessEqual(self.msg.getArgs(ns), update_args)
def test_updateArgsBARE(self):
self._test_updateArgsNS(message.BARE_NS)
def test_updateArgsNS1(self):
self._test_updateArgsNS(message.OPENID1_NS)
def test_updateArgsNS2(self):
self._test_updateArgsNS(message.OPENID2_NS)
def test_updateArgsNS3(self):
self._test_updateArgsNS('urn:nothing-significant')
def test_setArg(self):
self.failUnlessRaises(message.UndefinedOpenIDNamespace,
self.msg.setArg, message.OPENID_NS,
'does not', 'matter')
def _test_setArgNS(self, ns):
key = 'Camper van Beethoven'
value = 'David Lowery'
self.failUnlessEqual(self.msg.getArg(ns, key), None)
self.msg.setArg(ns, key, value)
self.failUnlessEqual(self.msg.getArg(ns, key), value)
def test_setArgBARE(self):
self._test_setArgNS(message.BARE_NS)
def test_setArgNS1(self):
self._test_setArgNS(message.OPENID1_NS)
def test_setArgNS2(self):
self._test_setArgNS(message.OPENID2_NS)
def test_setArgNS3(self):
self._test_setArgNS('urn:nothing-significant')
def test_setArgToNone(self):
self.failUnlessRaises(AssertionError, self.msg.setArg,
message.OPENID1_NS, 'op_endpoint', None)
def test_delArg(self):
# Could reasonably raise KeyError instead of raising
# UndefinedOpenIDNamespace. I'm not sure which one is more
# right, since this case should only happen when you're
# building a message from scratch and so have no default
# namespace.
self.failUnlessRaises(message.UndefinedOpenIDNamespace,
self.msg.delArg, message.OPENID_NS, 'key')
def _test_delArgNS(self, ns):
key = 'Camper van Beethoven'
self.failUnlessRaises(KeyError, self.msg.delArg, ns, key)
def test_delArgBARE(self):
self._test_delArgNS(message.BARE_NS)
def test_delArgNS1(self):
self._test_delArgNS(message.OPENID1_NS)
def test_delArgNS2(self):
self._test_delArgNS(message.OPENID2_NS)
def test_delArgNS3(self):
self._test_delArgNS('urn:nothing-significant')
def test_isOpenID1(self):
self.failIf(self.msg.isOpenID1())
def test_isOpenID2(self):
self.failIf(self.msg.isOpenID2())
class OpenID1MessageTest(unittest.TestCase):
def setUp(self):
self.msg = message.Message.fromPostArgs({'openid.mode':'error',
'openid.error':'unit test'})
def test_toPostArgs(self):
self.failUnlessEqual(self.msg.toPostArgs(),
{'openid.mode':'error',
'openid.error':'unit test'})
def test_toArgs(self):
self.failUnlessEqual(self.msg.toArgs(), {'mode':'error',
'error':'unit test'})
def test_toKVForm(self):
self.failUnlessEqual(self.msg.toKVForm(),
'error:unit test\nmode:error\n')
def test_toURLEncoded(self):
self.failUnlessEqual(self.msg.toURLEncoded(),
'openid.error=unit+test&openid.mode=error')
def test_toURL(self):
base_url = 'http://base.url/'
actual = self.msg.toURL(base_url)
actual_base = actual[:len(base_url)]
self.failUnlessEqual(actual_base, base_url)
self.failUnlessEqual(actual[len(base_url)], '?')
query = actual[len(base_url) + 1:]
parsed = cgi.parse_qs(query)
self.failUnlessEqual(parsed, {'openid.mode':['error'],
'openid.error':['unit test']})
def test_getOpenID(self):
self.failUnlessEqual(self.msg.getOpenIDNamespace(), message.OPENID1_NS)
def test_getKeyOpenID(self):
self.failUnlessEqual(self.msg.getKey(message.OPENID_NS, 'mode'),
'openid.mode')
def test_getKeyBARE(self):
self.failUnlessEqual(self.msg.getKey(message.BARE_NS, 'mode'), 'mode')
def test_getKeyNS1(self):
self.failUnlessEqual(
self.msg.getKey(message.OPENID1_NS, 'mode'), 'openid.mode')
def test_getKeyNS2(self):
self.failUnlessEqual(self.msg.getKey(message.OPENID2_NS, 'mode'), None)
def test_getKeyNS3(self):
self.failUnlessEqual(
self.msg.getKey('urn:nothing-significant', 'mode'), None)
def test_hasKey(self):
self.failUnlessEqual(self.msg.hasKey(message.OPENID_NS, 'mode'), True)
def test_hasKeyBARE(self):
self.failUnlessEqual(self.msg.hasKey(message.BARE_NS, 'mode'), False)
def test_hasKeyNS1(self):
self.failUnlessEqual(self.msg.hasKey(message.OPENID1_NS, 'mode'), True)
def test_hasKeyNS2(self):
self.failUnlessEqual(
self.msg.hasKey(message.OPENID2_NS, 'mode'), False)
def test_hasKeyNS3(self):
self.failUnlessEqual(
self.msg.hasKey('urn:nothing-significant', 'mode'), False)
test_getArgBARE = mkGetArgTest(message.BARE_NS, 'mode')
test_getArgNS = mkGetArgTest(message.OPENID_NS, 'mode', 'error')
test_getArgNS1 = mkGetArgTest(message.OPENID1_NS, 'mode', 'error')
test_getArgNS2 = mkGetArgTest(message.OPENID2_NS, 'mode')
test_getArgNS3 = mkGetArgTest('urn:nothing-significant', 'mode')
def test_getArgs(self):
self.failUnlessEqual(self.msg.getArgs(message.OPENID_NS),
{'mode':'error',
'error':'unit test',
})
def test_getArgsBARE(self):
self.failUnlessEqual(self.msg.getArgs(message.BARE_NS), {})
def test_getArgsNS1(self):
self.failUnlessEqual(self.msg.getArgs(message.OPENID1_NS),
{'mode':'error',
'error':'unit test',
})
def test_getArgsNS2(self):
self.failUnlessEqual(self.msg.getArgs(message.OPENID2_NS), {})
def test_getArgsNS3(self):
self.failUnlessEqual(self.msg.getArgs('urn:nothing-significant'), {})
def _test_updateArgsNS(self, ns, before=None):
if before is None:
before = {}
update_args = {
'Camper van Beethoven':'David Lowery',
'Magnolia Electric Co.':'Jason Molina',
}
self.failUnlessEqual(self.msg.getArgs(ns), before)
self.msg.updateArgs(ns, update_args)
after = dict(before)
after.update(update_args)
self.failUnlessEqual(self.msg.getArgs(ns), after)
def test_updateArgs(self):
self._test_updateArgsNS(message.OPENID_NS,
before={'mode':'error', 'error':'unit test'})
def test_updateArgsBARE(self):
self._test_updateArgsNS(message.BARE_NS)
def test_updateArgsNS1(self):
self._test_updateArgsNS(message.OPENID1_NS,
before={'mode':'error', 'error':'unit test'})
def test_updateArgsNS2(self):
self._test_updateArgsNS(message.OPENID2_NS)
def test_updateArgsNS3(self):
self._test_updateArgsNS('urn:nothing-significant')
def _test_setArgNS(self, ns):
key = 'Camper van Beethoven'
value = 'David Lowery'
self.failUnlessEqual(self.msg.getArg(ns, key), None)
self.msg.setArg(ns, key, value)
self.failUnlessEqual(self.msg.getArg(ns, key), value)
def test_setArg(self):
self._test_setArgNS(message.OPENID_NS)
def test_setArgBARE(self):
self._test_setArgNS(message.BARE_NS)
def test_setArgNS1(self):
self._test_setArgNS(message.OPENID1_NS)
def test_setArgNS2(self):
self._test_setArgNS(message.OPENID2_NS)
def test_setArgNS3(self):
self._test_setArgNS('urn:nothing-significant')
def _test_delArgNS(self, ns):
key = 'Camper van Beethoven'
value = 'David Lowery'
self.failUnlessRaises(KeyError, self.msg.delArg, ns, key)
self.msg.setArg(ns, key, value)
self.failUnlessEqual(self.msg.getArg(ns, key), value)
self.msg.delArg(ns, key)
self.failUnlessEqual(self.msg.getArg(ns, key), None)
def test_delArg(self):
self._test_delArgNS(message.OPENID_NS)
def test_delArgBARE(self):
self._test_delArgNS(message.BARE_NS)
def test_delArgNS1(self):
self._test_delArgNS(message.OPENID1_NS)
def test_delArgNS2(self):
self._test_delArgNS(message.OPENID2_NS)
def test_delArgNS3(self):
self._test_delArgNS('urn:nothing-significant')
def test_isOpenID1(self):
self.failUnless(self.msg.isOpenID1())
def test_isOpenID2(self):
self.failIf(self.msg.isOpenID2())
class OpenID1ExplicitMessageTest(unittest.TestCase):
def setUp(self):
self.msg = message.Message.fromPostArgs({'openid.mode':'error',
'openid.error':'unit test',
'openid.ns':message.OPENID1_NS
})
def test_toPostArgs(self):
self.failUnlessEqual(self.msg.toPostArgs(),
{'openid.mode':'error',
'openid.error':'unit test',
'openid.ns':message.OPENID1_NS
})
def test_toArgs(self):
self.failUnlessEqual(self.msg.toArgs(), {'mode':'error',
'error':'unit test',
'ns':message.OPENID1_NS})
def test_toKVForm(self):
self.failUnlessEqual(self.msg.toKVForm(),
'error:unit test\nmode:error\nns:%s\n'
%message.OPENID1_NS)
def test_toURLEncoded(self):
self.failUnlessEqual(self.msg.toURLEncoded(),
'openid.error=unit+test&openid.mode=error&openid.ns=http%3A%2F%2Fopenid.net%2Fsignon%2F1.0')
def test_toURL(self):
base_url = 'http://base.url/'
actual = self.msg.toURL(base_url)
actual_base = actual[:len(base_url)]
self.failUnlessEqual(actual_base, base_url)
self.failUnlessEqual(actual[len(base_url)], '?')
query = actual[len(base_url) + 1:]
parsed = cgi.parse_qs(query)
self.failUnlessEqual(parsed, {'openid.mode':['error'],
'openid.error':['unit test'],
'openid.ns':[message.OPENID1_NS]
})
def test_isOpenID1(self):
self.failUnless(self.msg.isOpenID1())
class OpenID2MessageTest(unittest.TestCase):
def setUp(self):
self.msg = message.Message.fromPostArgs({'openid.mode':'error',
'openid.error':'unit test',
'openid.ns':message.OPENID2_NS
})
self.msg.setArg(message.BARE_NS, "xey", "value")
def test_toPostArgs(self):
self.failUnlessEqual(self.msg.toPostArgs(),
{'openid.mode':'error',
'openid.error':'unit test',
'openid.ns':message.OPENID2_NS,
'xey': 'value',
})
def test_toArgs(self):
# This method can't tolerate BARE_NS.
self.msg.delArg(message.BARE_NS, "xey")
self.failUnlessEqual(self.msg.toArgs(), {'mode':'error',
'error':'unit test',
'ns':message.OPENID2_NS,
})
def test_toKVForm(self):
# Can't tolerate BARE_NS in kvform
self.msg.delArg(message.BARE_NS, "xey")
self.failUnlessEqual(self.msg.toKVForm(),
'error:unit test\nmode:error\nns:%s\n' %
(message.OPENID2_NS,))
def _test_urlencoded(self, s):
expected = ('openid.error=unit+test&openid.mode=error&'
'openid.ns=%s&xey=value' % (
urllib.quote(message.OPENID2_NS, ''),))
self.failUnlessEqual(s, expected)
def test_toURLEncoded(self):
self._test_urlencoded(self.msg.toURLEncoded())
def test_toURL(self):
base_url = 'http://base.url/'
actual = self.msg.toURL(base_url)
actual_base = actual[:len(base_url)]
self.failUnlessEqual(actual_base, base_url)
self.failUnlessEqual(actual[len(base_url)], '?')
query = actual[len(base_url) + 1:]
self._test_urlencoded(query)
def test_getOpenID(self):
self.failUnlessEqual(self.msg.getOpenIDNamespace(), message.OPENID2_NS)
def test_getKeyOpenID(self):
self.failUnlessEqual(self.msg.getKey(message.OPENID_NS, 'mode'),
'openid.mode')
def test_getKeyBARE(self):
self.failUnlessEqual(self.msg.getKey(message.BARE_NS, 'mode'), 'mode')
def test_getKeyNS1(self):
self.failUnlessEqual(
self.msg.getKey(message.OPENID1_NS, 'mode'), None)
def test_getKeyNS2(self):
self.failUnlessEqual(
self.msg.getKey(message.OPENID2_NS, 'mode'), 'openid.mode')
def test_getKeyNS3(self):
self.failUnlessEqual(
self.msg.getKey('urn:nothing-significant', 'mode'), None)
def test_hasKeyOpenID(self):
self.failUnlessEqual(self.msg.hasKey(message.OPENID_NS, 'mode'), True)
def test_hasKeyBARE(self):
self.failUnlessEqual(self.msg.hasKey(message.BARE_NS, 'mode'), False)
def test_hasKeyNS1(self):
self.failUnlessEqual(
self.msg.hasKey(message.OPENID1_NS, 'mode'), False)
def test_hasKeyNS2(self):
self.failUnlessEqual(
self.msg.hasKey(message.OPENID2_NS, 'mode'), True)
def test_hasKeyNS3(self):
self.failUnlessEqual(
self.msg.hasKey('urn:nothing-significant', 'mode'), False)
test_getArgBARE = mkGetArgTest(message.BARE_NS, 'mode')
test_getArgNS = mkGetArgTest(message.OPENID_NS, 'mode', 'error')
test_getArgNS1 = mkGetArgTest(message.OPENID1_NS, 'mode')
test_getArgNS2 = mkGetArgTest(message.OPENID2_NS, 'mode', 'error')
test_getArgNS3 = mkGetArgTest('urn:nothing-significant', 'mode')
def test_getArgsOpenID(self):
self.failUnlessEqual(self.msg.getArgs(message.OPENID_NS),
{'mode':'error',
'error':'unit test',
})
def test_getArgsBARE(self):
self.failUnlessEqual(self.msg.getArgs(message.BARE_NS),
{'xey': 'value'})
def test_getArgsNS1(self):
self.failUnlessEqual(self.msg.getArgs(message.OPENID1_NS), {})
def test_getArgsNS2(self):
self.failUnlessEqual(self.msg.getArgs(message.OPENID2_NS),
{'mode':'error',
'error':'unit test',
})
def test_getArgsNS3(self):
self.failUnlessEqual(self.msg.getArgs('urn:nothing-significant'), {})
def _test_updateArgsNS(self, ns, before=None):
if before is None:
before = {}
update_args = {
'Camper van Beethoven':'David Lowery',
'Magnolia Electric Co.':'Jason Molina',
}
self.failUnlessEqual(self.msg.getArgs(ns), before)
self.msg.updateArgs(ns, update_args)
after = dict(before)
after.update(update_args)
self.failUnlessEqual(self.msg.getArgs(ns), after)
def test_updateArgsOpenID(self):
self._test_updateArgsNS(message.OPENID_NS,
before={'mode':'error', 'error':'unit test'})
def test_updateArgsBARE(self):
self._test_updateArgsNS(message.BARE_NS,
before={'xey':'value'})
def test_updateArgsNS1(self):
self._test_updateArgsNS(message.OPENID1_NS)
def test_updateArgsNS2(self):
self._test_updateArgsNS(message.OPENID2_NS,
before={'mode':'error', 'error':'unit test'})
def test_updateArgsNS3(self):
self._test_updateArgsNS('urn:nothing-significant')
def _test_setArgNS(self, ns):
key = 'Camper van Beethoven'
value = 'David Lowery'
self.failUnlessEqual(self.msg.getArg(ns, key), None)
self.msg.setArg(ns, key, value)
self.failUnlessEqual(self.msg.getArg(ns, key), value)
def test_setArgOpenID(self):
self._test_setArgNS(message.OPENID_NS)
def test_setArgBARE(self):
self._test_setArgNS(message.BARE_NS)
def test_setArgNS1(self):
self._test_setArgNS(message.OPENID1_NS)
def test_setArgNS2(self):
self._test_setArgNS(message.OPENID2_NS)
def test_setArgNS3(self):
self._test_setArgNS('urn:nothing-significant')
def test_badAlias(self):
"""Make sure dotted aliases and OpenID protocol fields are not
allowed as namespace aliases."""
for f in message.OPENID_PROTOCOL_FIELDS + ['dotted.alias']:
args = {'openid.ns.%s' % f: 'blah',
'openid.%s.foo' % f: 'test'}
# .fromPostArgs covers .fromPostArgs, .fromOpenIDArgs,
# ._fromOpenIDArgs, and .fromOpenIDArgs (since it calls
# .fromPostArgs).
self.failUnlessRaises(AssertionError, self.msg.fromPostArgs,
args)
def test_mysterious_missing_namespace_bug(self):
"""A failing test for bug #112"""
openid_args = {
'assoc_handle': '{{HMAC-SHA256}{1211477242.29743}{v5cadg==}',
'claimed_id': 'http://nerdbank.org/OPAffirmative/AffirmativeIdentityWithSregNoAssoc.aspx',
'ns.sreg': 'http://openid.net/extensions/sreg/1.1',
'response_nonce': '2008-05-22T17:27:22ZUoW5.\\NV',
'signed': 'return_to,identity,claimed_id,op_endpoint,response_nonce,ns.sreg,sreg.email,sreg.nickname,assoc_handle',
'sig': 'e3eGZ10+TNRZitgq5kQlk5KmTKzFaCRI8OrRoXyoFa4=',
'mode': 'check_authentication',
'op_endpoint': 'http://nerdbank.org/OPAffirmative/ProviderNoAssoc.aspx',
'sreg.nickname': 'Andy',
'return_to': 'http://localhost.localdomain:8001/process?janrain_nonce=2008-05-22T17%3A27%3A21ZnxHULd',
'invalidate_handle': '{{HMAC-SHA1}{1211477241.92242}{H0akXw==}',
'identity': 'http://nerdbank.org/OPAffirmative/AffirmativeIdentityWithSregNoAssoc.aspx',
'sreg.email': 'a@b.com'
}
m = message.Message.fromOpenIDArgs(openid_args)
self.failUnless(('http://openid.net/extensions/sreg/1.1', 'sreg') in
list(m.namespaces.iteritems()))
missing = []
for k in openid_args['signed'].split(','):
if not ("openid."+k) in m.toPostArgs().keys():
missing.append(k)
self.assertEqual([], missing, missing)
self.assertEqual(openid_args, m.toArgs())
self.failUnless(m.isOpenID1())
def test_112B(self):
args = {'openid.assoc_handle': 'fa1f5ff0-cde4-11dc-a183-3714bfd55ca8',
'openid.claimed_id': 'http://binkley.lan/user/test01',
'openid.identity': 'http://test01.binkley.lan/',
'openid.mode': 'id_res',
'openid.ns': 'http://specs.openid.net/auth/2.0',
'openid.ns.pape': 'http://specs.openid.net/extensions/pape/1.0',
'openid.op_endpoint': 'http://binkley.lan/server',
'openid.pape.auth_policies': 'none',
'openid.pape.auth_time': '2008-01-28T20:42:36Z',
'openid.pape.nist_auth_level': '0',
'openid.response_nonce': '2008-01-28T21:07:04Z99Q=',
'openid.return_to': 'http://binkley.lan:8001/process?janrain_nonce=2008-01-28T21%3A07%3A02Z0tMIKx',
'openid.sig': 'YJlWH4U6SroB1HoPkmEKx9AyGGg=',
'openid.signed': 'assoc_handle,identity,response_nonce,return_to,claimed_id,op_endpoint,pape.auth_time,ns.pape,pape.nist_auth_level,pape.auth_policies'
}
m = message.Message.fromPostArgs(args)
missing = []
for k in args['openid.signed'].split(','):
if not ("openid."+k) in m.toPostArgs().keys():
missing.append(k)
self.assertEqual([], missing, missing)
self.assertEqual(args, m.toPostArgs())
self.failUnless(m.isOpenID2())
def test_implicit_sreg_ns(self):
openid_args = {
'sreg.email': 'a@b.com'
}
m = message.Message.fromOpenIDArgs(openid_args)
self.failUnless((sreg.ns_uri, 'sreg') in
list(m.namespaces.iteritems()))
self.assertEqual('a@b.com', m.getArg(sreg.ns_uri, 'email'))
self.assertEqual(openid_args, m.toArgs())
self.failUnless(m.isOpenID1())
def _test_delArgNS(self, ns):
key = 'Camper van Beethoven'
value = 'David Lowery'
self.failUnlessRaises(KeyError, self.msg.delArg, ns, key)
self.msg.setArg(ns, key, value)
self.failUnlessEqual(self.msg.getArg(ns, key), value)
self.msg.delArg(ns, key)
self.failUnlessEqual(self.msg.getArg(ns, key), None)
def test_delArgOpenID(self):
self._test_delArgNS(message.OPENID_NS)
def test_delArgBARE(self):
self._test_delArgNS(message.BARE_NS)
def test_delArgNS1(self):
self._test_delArgNS(message.OPENID1_NS)
def test_delArgNS2(self):
self._test_delArgNS(message.OPENID2_NS)
def test_delArgNS3(self):
self._test_delArgNS('urn:nothing-significant')
def test_overwriteExtensionArg(self):
ns = 'urn:unittest_extension'
key = 'mykey'
value_1 = 'value_1'
value_2 = 'value_2'
self.msg.setArg(ns, key, value_1)
self.failUnless(self.msg.getArg(ns, key) == value_1)
self.msg.setArg(ns, key, value_2)
self.failUnless(self.msg.getArg(ns, key) == value_2)
def test_argList(self):
self.failUnlessRaises(TypeError, self.msg.fromPostArgs,
{'arg': [1, 2, 3]})
def test_isOpenID1(self):
self.failIf(self.msg.isOpenID1())
def test_isOpenID2(self):
self.failUnless(self.msg.isOpenID2())
class MessageTest(unittest.TestCase):
def setUp(self):
self.postargs = {
'openid.ns': message.OPENID2_NS,
'openid.mode': 'checkid_setup',
'openid.identity': 'http://bogus.example.invalid:port/',
'openid.assoc_handle': 'FLUB',
'openid.return_to': 'Neverland',
}
self.action_url = 'scheme://host:port/path?query'
self.form_tag_attrs = {
'company': 'janrain',
'class': 'fancyCSS',
}
self.submit_text = 'GO!'
### Expected data regardless of input
self.required_form_attrs = {
'accept-charset':'UTF-8',
'enctype':'application/x-www-form-urlencoded',
'method': 'post',
}
def _checkForm(self, html, message_, action_url,
form_tag_attrs, submit_text):
E = oidutil.importElementTree()
# Build element tree from HTML source
input_tree = E.ElementTree(E.fromstring(html))
# Get root element
form = input_tree.getroot()
# Check required form attributes
for k, v in self.required_form_attrs.iteritems():
assert form.attrib[k] == v, \
"Expected '%s' for required form attribute '%s', got '%s'" % \
(v, k, form.attrib[k])
# Check extra form attributes
for k, v in form_tag_attrs.iteritems():
# Skip attributes that already passed the required
# attribute check, since they should be ignored by the
# form generation code.
if k in self.required_form_attrs:
continue
assert form.attrib[k] == v, \
"Form attribute '%s' should be '%s', found '%s'" % \
(k, v, form.attrib[k])
# Check hidden fields against post args
hiddens = [e for e in form \
if e.tag.upper() == 'INPUT' and \
e.attrib['type'].upper() == 'HIDDEN']
# For each post arg, make sure there is a hidden with that
# value. Make sure there are no other hiddens.
for name, value in message_.toPostArgs().iteritems():
for e in hiddens:
if e.attrib['name'] == name:
assert e.attrib['value'] == value, \
"Expected value of hidden input '%s' to be '%s', got '%s'" % \
(e.attrib['name'], value, e.attrib['value'])
break
else:
self.fail("Post arg '%s' not found in form" % (name,))
for e in hiddens:
assert e.attrib['name'] in message_.toPostArgs().keys(), \
"Form element for '%s' not in " + \
"original message" % (e.attrib['name'])
# Check action URL
assert form.attrib['action'] == action_url, \
"Expected form 'action' to be '%s', got '%s'" % \
(action_url, form.attrib['action'])
# Check submit text
submits = [e for e in form \
if e.tag.upper() == 'INPUT' and \
e.attrib['type'].upper() == 'SUBMIT']
assert len(submits) == 1, \
"Expected only one 'input' with type = 'submit', got %d" % \
(len(submits),)
assert submits[0].attrib['value'] == submit_text, \
"Expected submit value to be '%s', got '%s'" % \
(submit_text, submits[0].attrib['value'])
def test_toFormMarkup(self):
m = message.Message.fromPostArgs(self.postargs)
html = m.toFormMarkup(self.action_url, self.form_tag_attrs,
self.submit_text)
self._checkForm(html, m, self.action_url,
self.form_tag_attrs, self.submit_text)
def test_overrideMethod(self):
"""Be sure that caller cannot change form method to GET."""
m = message.Message.fromPostArgs(self.postargs)
tag_attrs = dict(self.form_tag_attrs)
tag_attrs['method'] = 'GET'
html = m.toFormMarkup(self.action_url, self.form_tag_attrs,
self.submit_text)
self._checkForm(html, m, self.action_url,
self.form_tag_attrs, self.submit_text)
def test_overrideRequired(self):
"""Be sure that caller CANNOT change the form charset for
encoding type."""
m = message.Message.fromPostArgs(self.postargs)
tag_attrs = dict(self.form_tag_attrs)
tag_attrs['accept-charset'] = 'UCS4'
tag_attrs['enctype'] = 'invalid/x-broken'
html = m.toFormMarkup(self.action_url, tag_attrs,
self.submit_text)
self._checkForm(html, m, self.action_url,
tag_attrs, self.submit_text)
def test_setOpenIDNamespace_invalid(self):
m = message.Message()
invalid_things = [
# Empty string is not okay here.
'',
# Good guess! But wrong.
'http://openid.net/signon/2.0',
# What?
u'http://specs%\\\r2Eopenid.net/auth/2.0',
# Too much escapings!
'http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0',
# This is a Type URI, not a openid.ns value.
'http://specs.openid.net/auth/2.0/signon',
]
for x in invalid_things:
self.failUnlessRaises(message.InvalidOpenIDNamespace,
m.setOpenIDNamespace, x, False)
def test_isOpenID1(self):
v1_namespaces = [
# Yes, there are two of them.
'http://openid.net/signon/1.1',
'http://openid.net/signon/1.0',
]
for ns in v1_namespaces:
m = message.Message(ns)
self.failUnless(m.isOpenID1(), "%r not recognized as OpenID 1" %
(ns,))
self.failUnlessEqual(ns, m.getOpenIDNamespace())
self.failUnless(m.namespaces.isImplicit(ns),
m.namespaces.getNamespaceURI(message.NULL_NAMESPACE))
def test_isOpenID2(self):
ns = 'http://specs.openid.net/auth/2.0'
m = message.Message(ns)
self.failUnless(m.isOpenID2())
self.failIf(m.namespaces.isImplicit(message.NULL_NAMESPACE))
self.failUnlessEqual(ns, m.getOpenIDNamespace())
def test_setOpenIDNamespace_explicit(self):
m = message.Message()
m.setOpenIDNamespace(message.THE_OTHER_OPENID1_NS, False)
self.failIf(m.namespaces.isImplicit(message.THE_OTHER_OPENID1_NS))
def test_setOpenIDNamespace_implicit(self):
m = message.Message()
m.setOpenIDNamespace(message.THE_OTHER_OPENID1_NS, True)
self.failUnless(m.namespaces.isImplicit(message.THE_OTHER_OPENID1_NS))
def test_explicitOpenID11NSSerialzation(self):
m = message.Message()
m.setOpenIDNamespace(message.THE_OTHER_OPENID1_NS, implicit=False)
post_args = m.toPostArgs()
self.failUnlessEqual(post_args,
{'openid.ns':message.THE_OTHER_OPENID1_NS})
def test_fromPostArgs_ns11(self):
# An example of the stuff that some Drupal installations send us,
# which includes openid.ns but is 1.1.
query = {
u'openid.assoc_handle': u'',
u'openid.claimed_id': u'http://foobar.invalid/',
u'openid.identity': u'http://foobar.myopenid.com',
u'openid.mode': u'checkid_setup',
u'openid.ns': u'http://openid.net/signon/1.1',
u'openid.ns.sreg': u'http://openid.net/extensions/sreg/1.1',
u'openid.return_to': u'http://drupal.invalid/return_to',
u'openid.sreg.required': u'nickname,email',
u'openid.trust_root': u'http://drupal.invalid',
}
m = message.Message.fromPostArgs(query)
self.failUnless(m.isOpenID1())
class NamespaceMapTest(unittest.TestCase):
def test_onealias(self):
nsm = message.NamespaceMap()
uri = 'http://example.com/foo'
alias = "foo"
nsm.addAlias(uri, alias)
self.failUnless(nsm.getNamespaceURI(alias) == uri)
self.failUnless(nsm.getAlias(uri) == alias)
def test_iteration(self):
nsm = message.NamespaceMap()
uripat = 'http://example.com/foo%r'
nsm.add(uripat%0)
for n in range(1,23):
self.failUnless(uripat%(n-1) in nsm)
self.failUnless(nsm.isDefined(uripat%(n-1)))
nsm.add(uripat%n)
for (uri, alias) in nsm.iteritems():
self.failUnless(uri[22:]==alias[3:])
i=0
it = nsm.iterAliases()
try:
while True:
it.next()
i += 1
except StopIteration:
self.failUnless(i == 23)
i=0
it = nsm.iterNamespaceURIs()
try:
while True:
it.next()
i += 1
except StopIteration:
self.failUnless(i == 23)
if __name__ == '__main__':
unittest.main()
|
mzdaniel/oh-mainline
|
vendor/packages/python-openid/openid/test/test_message.py
|
Python
|
agpl-3.0
| 37,611
|
def f(x):
print(x < 0, x <= 0, x > 0, x >= 0, x != 0)
print(x.foo)
print(f(<warning descr="Type 'bool' doesn't have expected attribute 'foo'">True</warning>))
print(f(<warning descr="Type 'int' doesn't have expected attribute 'foo'">0</warning>))
print(f(<warning descr="Type 'float' doesn't have expected attribute 'foo'">3.14</warning>))
|
Lekanich/intellij-community
|
python/testData/inspections/PyTypeCheckerInspection/ComparisonOperatorsForNumericTypes.py
|
Python
|
apache-2.0
| 350
|
"""
Tests for select_related()
``select_related()`` follows all relationships and pre-caches any foreign key
values so that complex trees can be fetched in a single query. However, this
isn't always a good idea, so the ``depth`` argument control how many "levels"
the select-related behavior will traverse.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Who remembers high school biology?
@python_2_unicode_compatible
class Domain(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Kingdom(models.Model):
name = models.CharField(max_length=50)
domain = models.ForeignKey(Domain)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Phylum(models.Model):
name = models.CharField(max_length=50)
kingdom = models.ForeignKey(Kingdom)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Klass(models.Model):
name = models.CharField(max_length=50)
phylum = models.ForeignKey(Phylum)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Order(models.Model):
name = models.CharField(max_length=50)
klass = models.ForeignKey(Klass)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Family(models.Model):
name = models.CharField(max_length=50)
order = models.ForeignKey(Order)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Genus(models.Model):
name = models.CharField(max_length=50)
family = models.ForeignKey(Family)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Species(models.Model):
name = models.CharField(max_length=50)
genus = models.ForeignKey(Genus)
def __str__(self):
return self.name
# and we'll invent a new thing so we have a model with two foreign keys
@python_2_unicode_compatible
class HybridSpecies(models.Model):
name = models.CharField(max_length=50)
parent_1 = models.ForeignKey(Species, related_name='child_1')
parent_2 = models.ForeignKey(Species, related_name='child_2')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Topping(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Pizza(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping)
def __str__(self):
return self.name
@python_2_unicode_compatible
class TaggedItem(models.Model):
tag = models.CharField(max_length=30)
content_type = models.ForeignKey(ContentType, related_name='select_related_tagged_items')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
@python_2_unicode_compatible
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.url
|
pquentin/django
|
tests/select_related/models.py
|
Python
|
bsd-3-clause
| 3,320
|
try:
from urllib.parse import urljoin
except ImportError: # Python 2
from urlparse import urljoin
from django import template
from django.template.base import Node
from django.utils.encoding import iri_to_uri
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
return StaticNode.handle_simple(path)
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/templatetags/static.py
|
Python
|
bsd-3-clause
| 4,022
|
# -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
|
WangWenjun559/Weiss
|
summary/sumy/sklearn/neighbors/nearest_centroid.py
|
Python
|
apache-2.0
| 7,249
|
import sys
import os
sys.path.insert(0, os.path.abspath('./'))
|
aventurella/figurine
|
tests/context.py
|
Python
|
isc
| 62
|
from __future__ import (print_function)
def foo():
return "foo2"
|
Ryex/pyitect
|
tests/plugins/provide_pluginv2/provide.py
|
Python
|
isc
| 70
|
from oscar.apps.basket import apps
class BasketConfig(apps.BasketConfig):
name = "basket"
|
thelabnyc/django-oscar-wfrs
|
sandbox/basket/apps.py
|
Python
|
isc
| 96
|
# -*- coding: utf-8 -*-
import yaml
class AnsibleHandler(object):
"""
Messier utility class for managing Ansible information.
All methods are accessible via the Messier object. Call them from Messier.
"""
def __init__(self):
pass
|
conorsch/messier
|
messier/ansible_handler.py
|
Python
|
isc
| 262
|
import re
from .base import Parser
class SshdLoginParser(Parser):
def parse(self, entry):
if entry.tag[0:4] == "sshd" and entry.content[0:8] == "Accepted":
ssh_re = re.compile(
r'^Accepted (?P<method>[\w\-/]+) for (?P<user>\w+) from '
r'(?P<ip>[0-9a-f:\.]+) port (?P<port>\d+) (?P<protocol>\w+)')
m = ssh_re.match(entry.content)
attrs = m.groupdict()
return "{hostname}: SSH login for {user} from {ip} using "\
"{method}".format(hostname=entry.hostname, **attrs)
|
mutantmonkey/notilog
|
notilog/parsers/sshd.py
|
Python
|
isc
| 574
|
#!/usr/bin/python
"""
This is mostly a Python wrapper for the shp2pgsql command line utility.
"""
import subprocess
import util
from importer_modes import *
def shape_to_pgsql(config, conn, shape_path, table, mode, srid=-1, log_file=None, batch_size=1000):
modeflags = {
str(IMPORT_MODE_CREATE): "c",
str(IMPORT_MODE_APPEND): "a",
str(IMPORT_MODE_STRUCTURE): "p",
str(IMPORT_MODE_DATA): "",
str(IMPORT_MODE_SPATIAL_INDEX): ""
}
args = [
config.shp2pgsql,
"-%s" % ''.join([modeflags[f] for f in modeflags.keys() if int(f) & mode]),
"-W", "latin1",
"-g", "the_geom",
"-s", str(srid),
shape_path,
table]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=log_file)
cursor = conn.cursor()
try:
with p.stdout as stdout:
for commands in util.groupsgen(util.read_until(stdout, ';'), batch_size):
command = ''.join(commands).strip()
if len(command) > 0:
cursor.execute(command)
conn.commit()
except:
conn.rollback()
raise
finally:
cursor.close()
def vacuum_analyze(conn, table):
isolation_level = conn.isolation_level
conn.set_isolation_level(0)
cursor = conn.cursor()
try:
cursor.execute('vacuum analyze %s;' % table)
finally:
cursor.close()
conn.set_isolation_level(isolation_level)
if __name__ == '__main__':
import config
import psycopg2
import os.path
from sys import argv
conn = psycopg2.connect("host=%s dbname=%s user=%s password=%s" % \
(config.db['host'], config.db['name'], config.db['user'], config.db['password']))
for shape_file in argv[1:len(argv)]:
table = os.path.splitext(os.path.split(shape_file)[1])[0]
shape_to_pgsql(conn, shape_file, table, IMPORT_MODE_CREATE + IMPORT_MODE_DATA + IMPORT_MODE_SPATIAL_INDEX)
vacuum_analyze(conn, table)
|
kartena/gsd-osmify
|
Osmify/shp2pgsql.py
|
Python
|
isc
| 2,064
|
def largest_sum(nums):
max_sum = 0
current_sum = 0
for num in nums:
new_sum = current_sum + num
if new_sum > num:
# continue with current sublist
current_sum = new_sum
else:
# is better to restart the sublist
current_sum = num
if current_sum > max_sum:
max_sum = current_sum
return max_sum
def tests():
# exercise example
assert largest_sum([1, -3, 2, 3, 10, -5, 8, -12, 6]) == 18
# adding some zeroes
assert largest_sum([1, 0, -3, 2, 0, 3, 10, -5, 8, -12, 6]) == 18
# test empty string
assert largest_sum([]) == 0
# test non numeric input
try:
largest_sum([1, 2, 3, 'a'])
# As in the previous exercise, if no stated in the requirements
# is better to let the exception bubble up
assert False
except TypeError:
pass
print "done."
if __name__ == '__main__':
tests()
|
barracel/code_challenges
|
max_subset_sum/max_subset_sum.py
|
Python
|
isc
| 1,151
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the voitools package
# Copyright 2015 Board of Regents of the University of Wisconsin System
# Licensed under the MIT license; see LICENSE at the root of the package.
# Authored by Nate Vack <njvack@wisc.edu> at the Waisman Laboratory for Brain
# Imaging and Behavior.
"""Convert a spamalize .voi file into a set of nifti files.
This program will produce one .nii file per VOI in a VOI group (.voi) file;
the name will be determined by the --pattern parameter.
Usage:
voi2nii [options] <datafile>
voi2nii -h | --help
Options:
--pattern=<pat> How to name the output. Surround fields in {}.
Available fields:
base_name The name of the file the VOI group is
based on
cur_name The current name of the VOI group file
voi_number The index of this VOI
voxel_count The number of voxels in the VOI
voi_name The name of this VOI
[default: {cur_name}-{voi_name}-{voi_number}.nii]
--voi-numbers=<nums> The indexes (starting from 1) of the VOIs to convert.
Separate with commas. If not specified, converts all
VOIs.
--affine-parent=<parent_file>
Read orientation, origin, and voxel size from this
file.
If not specified, reads voxel size from the .voi file
and assumes a centered origin in RPI orientation.
--out-dir=<dir> Directory to write the output files [default: .]
-h --help Show this screen
--version Show version
-v --verbose Display debugging information
"""
import sys
import logging
import re
import os
import nibabel as nib
import voitools
from voitools import voi
from voitools.vendor import docopt
logger = voi.logger
PATTERN_SUBS = set(
['base_name', 'cur_name', 'voi_number', 'voxel_count', 'voi_name'])
WORD_CHAR_RE = re.compile(r"\W+")
def main(argv):
arguments = docopt.docopt(
__doc__,
argv,
version="voitools {0}".format(voitools.__version__))
logger.setLevel(logging.INFO)
if arguments['--verbose']:
logger.setLevel(logging.DEBUG)
logger.debug(arguments)
voi_group = voitools.voi.read_file(arguments['<datafile>'])
voi_indexes = make_voi_numbers(voi_group, arguments['--voi-numbers'])
voi_group.set_affine(make_affine(arguments['--affine-parent']))
process_vois(
voi_group,
arguments['--pattern'],
voi_indexes,
arguments['--out-dir'])
def make_voi_numbers(voi_group, voi_numbers_string):
if voi_numbers_string is None:
return range(voi_group.voi_count)
return [int(num) - 1 for num in voi_numbers_string.split(",")]
def make_affine(affine_parent_name):
if affine_parent_name is None:
return None
return nib.load(affine_parent_name).get_affine()
def make_filename(pattern, voi):
out_name = pattern
for attr in PATTERN_SUBS:
val = getattr(voi, "{0}".format(attr))
val_safe = re.sub(WORD_CHAR_RE, "_", str(val))
out_name = out_name.replace(
"{{{0}}}".format(attr), str(val_safe))
return out_name
def process_vois(voi_group, name_pattern, voi_indexes, out_dir):
vois = [voi_group.vois[i] for i in voi_indexes]
for cur_voi in vois:
logger.debug("Converting VOI {0}".format(cur_voi.voi_number))
nii = make_nifti(cur_voi)
out_filename = os.path.join(
out_dir, make_filename(name_pattern, cur_voi))
nii.to_filename(out_filename)
def make_nifti(cur_voi):
# It's OK if this is None, we'll just choose a centered affine.
logger.debug("Making nifti for {0}".format(cur_voi.voi_number))
img = nib.Nifti1Image(cur_voi.to_volume(), cur_voi.affine)
header = img.get_header()
header['qform_code'] = 1
header['sform_code'] = 1
img.update_header()
return img
def console():
main(sys.argv[1:])
if __name__ == "__main__":
console()
|
njvack/voitools
|
voitools/scripts/voi2nii.py
|
Python
|
mit
| 4,196
|
#!/usr/bin/env python2
__author__ = "Ryon Sherman"
__email__ = "ryon.sherman@gmail.com"
__copyright__ = "Copyright 2014, Ryon Sherman"
__license__ = "MIT"
import socket
import argparse
import asyncore
from nmb.log import logger
from nmb.parser import parser
from nmb.sockets import SocketDispatcher
class Client(SocketDispatcher):
pass
class Dispatcher(asyncore.dispatcher):
def __init__(self, sock):
# initialize dispatcher
asyncore.dispatcher.__init__(self)
# create socket
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# reuse address
self.set_reuse_addr()
# assign dispatcher properties
self.sock = sock
self.address = ':'.join(map(str, sock))
def start(self):
# log dispatcher start request
logger.debug("Starting dispatcher...")
# bind to socket
self.bind(self.sock)
# listen on socket
self.listen(5)
# log dispatcher start
logger.debug("Dispatcher started.")
def stop(self):
# log dispatcher stop request
logger.debug("Stopping dispatcher...")
# close socket
self.close()
# log dispatcher stop
logger.debug("Dispatcher stopped.")
def handle_accept(self):
# accept connection
connection = self.accept()
# return if connection is invalid
if connection is None:
return
# initialize client
client = Client(connection)
# log connection
logger.info("Client [%s] connecting..." % client.address)
# TODO: handle client transaction
client.write('HELO')
class Server(object):
# default port
port = 55555
# default address
address = '0.0.0.0'
def __init__(self, **kwargs):
# assign server properties
self.port = int(kwargs.get('port', self.port))
self.address = kwargs.get('address', self.address)
# initialize remote dispatcher
self.remote = Dispatcher((self.address, self.port))
def start(self):
# log server start request
logger.info("Starting server...")
# start remote dispatcher
self.remote.start()
# log server start
logger.info("Server started. Awaiting connections at [%s:%s]..." %
(self.address, self.port))
# start server loop
asyncore.loop()
# stop server
self.stop()
def stop(self):
# log server stop request
logger.info("Stopping server...")
# stop remote dispatcher
self.remote.stop()
# log server stop
logger.info("Server stopped.")
def main():
# define 'dir' argument
parser.add_argument('--dir', default='./',
type=parser.path_exists,
help="Working directory (default: %(default)s)")
# define 'address' argument
parser.add_argument('--address', default=Server.address,
help="Bound interface address (default: %(default)s)")
# define 'port' argument
parser.add_argument('--port', type=int, default=Server.port,
help="Server port (default: %(default)s)")
# define 'log' argument
parser.add_argument('--log',
default='./nmb-server.log',
help="Log path (default: %(default)s)")
# define 'log_level' argument
parser.add_argument('--log_level', default='INFO',
choices=['ERROR', 'INFO', 'DEBUG'],
help="Console log level (default: %(default)s)")
# define 'silent' argument
parser.add_argument('--silent', default=False,
action='store_true',
help="Disable console log output")
# parse arguments
args = parser.parse_args()
# if console output requested
if not args.silent:
# set console log handler
logger.setConsoleLogHandler(args.log_level)
# set file log handler
logger.setFileLogHandler(args.log)
# initialize server
server = Server(
port=args.port,
address=args.address
)
try:
# run server loop
server.start()
except (KeyboardInterrupt, SystemExit):
# interrupt server loop
server.stop()
if __name__ == '__main__':
main()
|
ryonsherman/network-message-bus
|
redux/src/nmb/server.py
|
Python
|
mit
| 4,217
|
"""
Copyright (C) 2014, Jill Huchital
"""
""" Clean _id out of objects returned from the DB, so that they'll JSONify nicely """
def clean_db_object(obj):
for key in ["_id"]:
obj.pop(key, None)
return obj
|
jillh510/video_story_arcs
|
utils.py
|
Python
|
mit
| 221
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_required
from tekton import router
from categoria import model
from categoria import validation
from tekton.gae.middleware.redirect import RedirectResponse
@no_csrf
@login_required
def index():
contexto = {'salvar_path': router.to_path(salvar), 'categoria': '', 'acao': 'adicionar'}
return TemplateResponse(contexto, template_path='categorias/cadastro.html')
@login_required
def salvar(_resp, **propriedades):
categoria_form = validation.CategoriaForm(**propriedades)
erros = categoria_form.validate()
if erros:
contexto = {'salvar_path': router.to_path(salvar), 'erros': erros, 'categoria': categoria_form}
return TemplateResponse(contexto, template_path='categorias/cadastro.html')
else:
# categoria = validation.CategoriaForm.fill_model()
categoria = model.Categoria(nome=propriedades['nome'], categoria_pai=propriedades['categoria_pai'])
categoria.put()
return RedirectResponse('/categorias')
|
iwilliam317/tekton
|
backend/appengine/routes/categorias/cadastro.py
|
Python
|
mit
| 1,193
|
#!/usr/bin/env python
# visualize-examples.py
import concepts.visualize
ARGS = {'directory': 'visualize-output', 'out_format': 'pdf'}
concepts.visualize.render_all('examples/*.cxt', **ARGS)
concepts.visualize.render_all('examples/[r-v]*.csv', encoding='utf-8', **ARGS)
|
pombredanne/concepts
|
visualize-examples.py
|
Python
|
mit
| 272
|
# -*- coding: utf-8 -*-
import pandas as pd
import re
import jieba
import numpy as np
import copy
# ###################### initial the parameter ####################### #
short_text = 600
useless_word = list(pd.read_excel("useless_1.xls",header=None)[0])
useless_word += [',',':','‘','’','','。','—','——',
'你','我','他','它','咱们','大家','自己',
'这','那','这儿','那边','各','每','的','了',
'谁','什么','哪','怎么','哪里','几','地']
##########################################################################
# the function to get the pure word ; Using the Regular Expression
def remove_mess(str):
p = re.compile('[a-zA-Z\"\'’\/\(\),:;~‘\(\)\|\-\*@#$%^&\[\]{}<>+`\s\n\r\\\\]')
return re.sub(p,'',str)
# the function to cut the word ; Using the "jieba" module
def cut_the_word(str_in):
length = len(str_in)
if length <= short_text:
tmp = list(jieba.cut(str_in, cut_all=False))
word = [x for x in tmp if x not in useless_word]
return word
else:
cut = int(0.2*length)
tmp = str_in[:cut]+str_in[-cut:]
word = list(jieba.cut(tmp, cut_all=False))
word = [x for x in word if x not in useless_word]
return word
# the function to get word sequence
# Read from "datasheet.csv",then assign each a number
def word_seq(x, maxlength):
store = list(map(lambda s: cut_the_word(s), x['passage']))
tmp = pd.read_csv('data_sheet.csv', header=None, index_col=0)
select = tmp[tmp[1] > 11]
count = copy.deepcopy(select[1])
count[:] = range(1, 1 + len(count))
def doc2num(a, maxlength):
a = [i for i in a if i in count]
a = list(count[a])
a += max(maxlength-len(a), 0)*[0]
return a
x['seq'] = list(map(lambda s: doc2num(s, maxlength), store))
return list(x['seq'])
|
yaoqr15/Machine-Learning-Based-Rumors-Detection
|
Keras_main/pre_process.py
|
Python
|
mit
| 1,911
|
import inspect
import os
import types
from contextlib import contextmanager
from . import discovery
from . import errors
from .plugin_interface import PluginInterface, TEST_FOLDER, CONTEXT, EXAMPLES, SETUP, ACTION, ASSERTION, TEARDOWN, NO_EXAMPLE
class TestRun(object):
def __init__(self, source, plugin_composite):
self.source = source
self.plugin_composite = plugin_composite
self.exception_handler = ExceptionHandler(self.plugin_composite)
def run(self):
with self.exception_handler.run_test_run(self):
if isinstance(self.source, type):
test_class = TestClass(self.source, self.plugin_composite)
test_class.run()
else:
modules = self.import_modules()
self.plugin_composite.process_module_list(modules)
for module in modules:
suite = Suite(module, self.plugin_composite)
suite.run()
def import_modules(self):
if isinstance(self.source, types.ModuleType):
return [self.source]
if os.path.isfile(self.source):
folder, filename = os.path.split(self.source)
importer = discovery.create_importer(folder, self.plugin_composite, self.exception_handler)
module = importer.import_file(filename)
if module is None:
return []
return [module]
if os.path.isdir(self.source):
return self.import_modules_from_folder(self.source)
def import_modules_from_folder(self, directory):
module_list = discovery.ModuleList(self.plugin_composite, self.exception_handler)
for folder, dirnames, _ in os.walk(directory):
self.remove_non_test_folders(folder, dirnames)
importer = discovery.create_importer(folder, self.plugin_composite, self.exception_handler)
for folder, filename in importer.module_specs():
module_list.add(folder, filename)
return [m for m in module_list.modules if m is not None]
def remove_non_test_folders(self, parent, dirnames):
replacement = []
for dirname in dirnames:
full_path = os.path.realpath(os.path.join(parent, dirname))
reply = self.plugin_composite.identify_folder(full_path)
if reply is TEST_FOLDER:
replacement.append(dirname)
dirnames[:] = replacement
class Suite(object):
def __init__(self, module, plugin_composite):
self.module = module
self.name = self.module.__name__
self.plugin_composite = plugin_composite
self.exception_handler = ExceptionHandler(self.plugin_composite)
self.classes = self.get_classes()
self.plugin_composite.process_class_list(self.module, self.classes)
def run(self):
with self.exception_handler.run_suite(self):
for cls in self.classes:
test_class = TestClass(cls, self.plugin_composite)
test_class.run()
def get_classes(self):
classes = []
for _, cls in inspect.getmembers(self.module, inspect.isclass):
response = self.plugin_composite.identify_class(cls)
if response is CONTEXT:
classes.append(cls)
return classes
class TestClass(object):
def __init__(self, cls, plugin_composite):
self.cls = cls
self.plugin_composite = plugin_composite
self.exception_handler = ExceptionHandler(self.plugin_composite)
self.examples_method = None
self.unbound_setups = []
self.unbound_action = None
self.unbound_assertions = []
self.unbound_teardowns = []
for superclass in reversed(inspect.getmro(cls)):
bottom_of_tree = (superclass is cls)
self.load_special_methods_from_class(superclass, bottom_of_tree)
self.unbound_teardowns.reverse()
if self.examples_method is None:
self.examples_method = lambda: None
if self.unbound_action is None:
self.unbound_action = lambda self: None
def run(self):
if not self.unbound_assertions:
return
with self.exception_handler.run_class(self):
for example in self.get_examples():
context = Context(
self.cls(), example,
self.unbound_setups,
self.unbound_action,
self.unbound_assertions,
self.unbound_teardowns,
self.plugin_composite
)
context.run()
def get_examples(self):
examples = self.examples_method()
return examples if examples is not None else [NO_EXAMPLE]
def load_special_methods_from_class(self, cls, bottom_of_tree):
# there should be one of each of these per class,
# but there may be more than one in the inheritance tree
class_setup = None
class_teardown = None
for name in cls.__dict__:
val = getattr(cls, name)
if callable(val) and not isprivate(name):
response = self.plugin_composite.identify_method(val)
if response is EXAMPLES and bottom_of_tree:
assert_not_too_many_special_methods(self.examples_method, cls, val)
self.examples_method = val
elif response is SETUP:
assert_not_too_many_special_methods(class_setup, cls, val)
class_setup = val
self.unbound_setups.append(val)
elif response is ACTION and bottom_of_tree:
assert_not_too_many_special_methods(self.unbound_action, cls, val)
self.unbound_action = val
elif response is ASSERTION and bottom_of_tree:
self.unbound_assertions.append(val)
elif response is TEARDOWN:
assert_not_too_many_special_methods(class_teardown, cls, val)
class_teardown = val
self.unbound_teardowns.append(val)
def isprivate(name):
return name.startswith('_')
def assert_not_too_many_special_methods(previously_found, cls, just_found):
if previously_found is not None:
msg = "Context {} has multiple methods of the same type:\n".format(cls.__qualname__)
msg += previously_found.__name__ + ", " + just_found.__name__
raise errors.TooManySpecialMethodsError(msg)
# This class and its relationship to TestClass is becoming awkward - figure out how best to split them up
class Context(object):
def __init__(self, instance, example,
unbound_setups, unbound_action, unbound_assertions, unbound_teardowns,
plugin_composite):
self.plugin_composite = plugin_composite
self.exception_handler = ExceptionHandler(self.plugin_composite)
self.instance = instance
self.example = example
self.name = instance.__class__.__name__
self.plugin_composite.process_assertion_list(self.instance.__class__, unbound_assertions)
bound_assertions = bind_methods(unbound_assertions, self.instance)
# Refactoring hint: Maybe these method attributes belong in an extracted class
self.setups = bind_methods(unbound_setups, self.instance)
self.action = types.MethodType(unbound_action, self.instance)
self.assertions = [Assertion(f, self.plugin_composite) for f in bound_assertions]
self.teardowns = bind_methods(unbound_teardowns, self.instance)
def run(self):
with self.exception_handler.run_context(self):
try:
self.run_setup()
self.run_action()
self.run_assertions()
finally:
self.run_teardown()
def run_setup(self):
for setup in self.setups:
run_with_test_data(setup, self.example)
def run_action(self):
run_with_test_data(self.action, self.example)
def run_assertions(self):
for assertion in self.assertions:
assertion.run(self.example)
def run_teardown(self):
for teardown in self.teardowns:
run_with_test_data(teardown, self.example)
def bind_methods(funcs, instance):
return [types.MethodType(func, instance) for func in funcs]
class Assertion(object):
def __init__(self, func, plugin_composite):
self.func = func
self.name = func.__name__
self.plugin_composite = plugin_composite
self.exception_handler = ExceptionHandler(self.plugin_composite)
def run(self, test_data):
with self.exception_handler.run_assertion(self):
run_with_test_data(self.func, test_data)
def run_with_test_data(func, test_data):
sig = inspect.signature(func)
if test_data is not NO_EXAMPLE and sig.parameters:
if isinstance(test_data, tuple) and len(sig.parameters) == len(test_data):
func(*test_data)
else:
func(test_data)
else:
func()
class ExceptionHandler(object):
def __init__(self, plugin_composite):
self.plugin_composite = plugin_composite
@contextmanager
def run_test_run(self, test_run):
self.plugin_composite.test_run_started()
try:
yield
except Exception as e:
self.plugin_composite.unexpected_error(e)
self.plugin_composite.test_run_ended()
@contextmanager
def importing(self, folder, filename):
try:
yield
except Exception as e:
self.plugin_composite.unexpected_error(e)
@contextmanager
def run_suite(self, suite):
self.plugin_composite.suite_started(suite.module)
yield
self.plugin_composite.suite_ended(suite.module)
@contextmanager
def run_class(self, test_class):
self.plugin_composite.test_class_started(test_class.cls)
try:
yield
except Exception as e:
self.plugin_composite.test_class_errored(test_class.cls, e)
else:
self.plugin_composite.test_class_ended(test_class.cls)
@contextmanager
def run_context(self, context):
self.plugin_composite.context_started(context.instance.__class__, context.example)
try:
yield
except Exception as e:
self.plugin_composite.context_errored(context.instance.__class__, context.example, e)
else:
self.plugin_composite.context_ended(context.instance.__class__, context.example)
@contextmanager
def run_assertion(self, assertion):
self.plugin_composite.assertion_started(assertion.func)
try:
yield
except AssertionError as e:
self.plugin_composite.assertion_failed(assertion.func, e)
except Exception as e:
self.plugin_composite.assertion_errored(assertion.func, e)
else:
self.plugin_composite.assertion_passed(assertion.func)
class PluginComposite(object):
def __init__(self, plugins):
self.plugins = plugins
def __getattr__(self, name):
if name not in PluginInterface.__dict__: # not expecting this to happen
raise AttributeError('The method {} is not part of the plugin interface'.format(name))
def plugin_method(*args, **kwargs):
for plugin in self.plugins:
reply = getattr(plugin, name, lambda *_: None)(*args, **kwargs)
if reply is not None:
return reply
return plugin_method
|
benjamin-hodgson/Contexts
|
src/contexts/core.py
|
Python
|
mit
| 11,664
|
# gdiplusflat.py created from gdiplusflat.h
# Copyright (c) 2012 Maxim Kolosov
GDIPVER = 0x0100
# enum Status
Status = 0
Ok = 0
GenericError = 1
InvalidParameter = 2
OutOfMemory = 3
ObjectBusy = 4
InsufficientBuffer = 5
NotImplemented = 6
Win32Error = 7
WrongState = 8
Aborted = 9
FileNotFound = 10
ValueOverflow = 11
AccessDenied = 12
UnknownImageFormat = 13
FontFamilyNotFound = 14
FontStyleNotFound = 15
NotTrueTypeFont = 16
UnsupportedGdiplusVersion = 17
GdiplusNotInitialized = 18
PropertyNotFound = 19
PropertyNotSupported = 20
if GDIPVER >= 0x0110:
ProfileNotFound = 21
# enum Unit constants
Unit = 0
UnitWorld = 0# World coordinate (non-physical unit)
UnitDisplay = 1# Variable -- for PageTransform only
UnitPixel = 2# Each unit is one device pixel.
UnitPoint = 3# Each unit is a printer's point, or 1/72 inch.
UnitInch = 4# Each unit is 1 inch.
UnitDocument = 5# Each unit is 1/300 inch.
UnitMillimeter = 6# Each unit is 1 millimeter.
# enum GdiplusStartupParams
GdiplusStartupParams = 0
GdiplusStartupDefault = 0
GdiplusStartupNoSetRound = 1
GdiplusStartupSetPSValue = 2
GdiplusStartupTransparencyMask = 0xFF000000
AlphaShift = 24
RedShift = 16
GreenShift = 8
BlueShift = 0
AlphaMask = 0xff000000
RedMask = 0x00ff0000
GreenMask = 0x0000ff00
BlueMask = 0x000000ff
def MakeARGB(a, r, g, b):
return c_ulong((b << BlueShift) | (g << GreenShift) | (r << RedShift) | (a << AlphaShift))
from ctypes import *
DebugEventProc = WINFUNCTYPE(None, c_int, c_char_p)
NotificationHookProc = WINFUNCTYPE(c_int, c_void_p)
NotificationUnhookProc = WINFUNCTYPE(None, c_void_p)
class GdiplusStartupInput(Structure):
'startup_input = GdiplusStartupInput(1, None, False, False)'
_fields_ = [('GdiplusVersion', c_uint),
('DebugEventCallback', DebugEventProc),
('SuppressBackgroundThread', c_bool),
('SuppressExternalCodecs', c_bool)]
class GdiplusStartupOutput(Structure):
_fields_ = [('NotificationHook', NotificationHookProc),
('NotificationUnhook', NotificationUnhookProc)]
#extern "C" Status WINAPI GdiplusStartup(OUT ULONG_PTR *token, const GdiplusStartupInput *input, OUT GdiplusStartupOutput *output);
GdiplusStartup = WINFUNCTYPE(c_int, c_void_p, POINTER(GdiplusStartupInput), c_void_p)(('GdiplusStartup', windll.gdiplus))
#extern "C" VOID WINAPI GdiplusShutdown(ULONG_PTR token);
GdiplusShutdown = WINFUNCTYPE(None, c_void_p)(('GdiplusShutdown', windll.gdiplus))
#=========
# Brush APIs
#GpStatus WINGDIPAPI GdipCloneBrush(GpBrush *brush, GpBrush **cloneBrush);
_GdipCloneBrush = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipCloneBrush', windll.gdiplus))
def GdipCloneBrush(brush):
cloneBrush = c_void_p()
status = _GdipCloneBrush(brush, byref(cloneBrush))
return status, cloneBrush
#GpStatus WINGDIPAPI GdipDeleteBrush(GpBrush *brush);
GdipDeleteBrush = WINFUNCTYPE(c_int, c_void_p)(('GdipDeleteBrush', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetBrushType(GpBrush *brush, GpBrushType *type);
_GdipGetBrushType = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetBrushType', windll.gdiplus))
def GdipGetBrushType(brush):
type_brush = c_int()
status = _GdipGetBrushType(brush, byref(type_brush))
return status, type_brush.value
#========
# SolidBrush APIs
#GpStatus WINGDIPAPI GdipCreateSolidFill(ARGB color, GpSolidFill **brush);
_GdipCreateSolidFill = WINFUNCTYPE(c_int, c_ulong, c_void_p)(('GdipCreateSolidFill', windll.gdiplus))
def GdipCreateSolidFill(color = 128):
brush = c_void_p()
status = _GdipCreateSolidFill(color, byref(brush))
return status, brush
#GpStatus WINGDIPAPI GdipSetSolidFillColor(GpSolidFill *brush, ARGB color);
GdipSetSolidFillColor = WINFUNCTYPE(c_int, c_void_p, c_ulong)(('GdipSetSolidFillColor', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetSolidFillColor(GpSolidFill *brush, ARGB *color);
_GdipGetSolidFillColor = WINFUNCTYPE(c_int, c_ulong, c_void_p)(('GdipGetSolidFillColor', windll.gdiplus))
def GdipGetSolidFillColor(brush):
color = c_ulong()
status = _GdipGetSolidFillColor(brush, byref(color))
return status, color.value
#========
# Pen APIs
#GpStatus WINGDIPAPI GdipCreatePen1(ARGB color, REAL width, GpUnit unit, GpPen **pen);
_GdipCreatePen1 = WINFUNCTYPE(c_int, c_ulong, c_float, c_int, c_void_p)(('GdipCreatePen1', windll.gdiplus))
def GdipCreatePen1(color = MakeARGB(255, 255, 255, 255), width = 1.0, unit = UnitWorld):
pen = c_void_p()
status = _GdipCreatePen1(color, width, unit, byref(pen))
return status, pen
#GpStatus WINGDIPAPI GdipCreatePen2(GpBrush *brush, REAL width, GpUnit unit, GpPen **pen);
_GdipCreatePen2 = WINFUNCTYPE(c_int, c_void_p, c_float, c_int, c_void_p)(('GdipCreatePen2', windll.gdiplus))
def GdipCreatePen2(color = None, width = 1.0, unit = UnitWorld):
pen = c_void_p()
status = _GdipCreatePen2(color, width, unit, byref(pen))
return status, pen
#GpStatus WINGDIPAPI GdipClonePen(GpPen *pen, GpPen **clonepen);
_GdipClonePen = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipClonePen', windll.gdiplus))
def GdipClonePen(pen):
clonepen = c_void_p()
status = _GdipClonePen(pen, byref(clonepen))
return status, clonepen
#GpStatus WINGDIPAPI GdipDeletePen(GpPen *pen);
GdipDeletePen = WINFUNCTYPE(c_int, c_void_p)(('GdipDeletePen', windll.gdiplus))
#GpStatus WINGDIPAPI GdipSetPenWidth(GpPen *pen, REAL width);
GdipSetPenWidth = WINFUNCTYPE(c_int, c_void_p, c_float)(('GdipSetPenWidth', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetPenWidth(GpPen *pen, REAL *width);
_GdipGetPenWidth = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetPenWidth', windll.gdiplus))
def GdipGetPenWidth(pen):
width = c_float()
status = _GdipGetPenWidth(pen, byref(width))
return status, width.value
#GpStatus WINGDIPAPI GdipSetPenUnit(GpPen *pen, GpUnit unit);
GdipSetPenUnit = WINFUNCTYPE(c_int, c_void_p, c_int)(('GdipSetPenUnit', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetPenUnit(GpPen *pen, GpUnit *unit);
_GdipGetPenUnit = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetPenUnit', windll.gdiplus))
def GdipGetPenUnit(pen):
unit = c_int()
status = _GdipGetPenUnit(pen, byref(unit))
return status, unit.value
#=========
# Image APIs
#GpStatus WINGDIPAPI GdipLoadImageFromStream(IStream* stream, GpImage **image);
_GdipLoadImageFromStream = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipLoadImageFromStream', windll.gdiplus))
def GdipLoadImageFromStream(stream):
image = c_void_p()
status = _GdipLoadImageFromStream(stream, byref(image))
return status, image
#GpStatus WINGDIPAPI GdipLoadImageFromFile(GDIPCONST WCHAR* filename, GpImage **image);
_GdipLoadImageFromFile = WINFUNCTYPE(c_int, c_wchar_p, c_void_p)(('GdipLoadImageFromFile', windll.gdiplus))
def GdipLoadImageFromFile(filename = ''):
image = c_void_p()
status = _GdipLoadImageFromFile(filename, byref(image))
return status, image
#GpStatus WINGDIPAPI GdipLoadImageFromStreamICM(IStream* stream, GpImage **image);
_GdipLoadImageFromStreamICM = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipLoadImageFromStreamICM', windll.gdiplus))
def GdipLoadImageFromStreamICM(stream):
image = c_void_p()
status = _GdipLoadImageFromStreamICM(stream, byref(image))
return status, image
#GpStatus WINGDIPAPI GdipLoadImageFromFileICM(GDIPCONST WCHAR* filename, GpImage **image);
_GdipLoadImageFromFileICM = WINFUNCTYPE(c_int, c_wchar_p, c_void_p)(('GdipLoadImageFromFileICM', windll.gdiplus))
def GdipLoadImageFromFileICM(filename = ''):
image = c_void_p()
status = _GdipLoadImageFromFileICM(filename, byref(image))
return status, image
#GpStatus WINGDIPAPI GdipCloneImage(GpImage *image, GpImage **cloneImage);
_GdipCloneImage = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipCloneImage', windll.gdiplus))
def GdipCloneImage(image):
cloneImage = c_void_p()
status = _GdipCloneImage(image, byref(cloneImage))
return status, cloneImage
#GpStatus WINGDIPAPI GdipDisposeImage(GpImage *image);
GdipDisposeImage = WINFUNCTYPE(c_int, c_void_p)(('GdipDisposeImage', windll.gdiplus))
#GpStatus WINGDIPAPI GdipSaveImageToFile(GpImage *image, GDIPCONST WCHAR* filename, GDIPCONST CLSID* clsidEncoder, GDIPCONST EncoderParameters* encoderParams);
GdipSaveImageToFile = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_void_p)(('GdipSaveImageToFile', windll.gdiplus))
#GpStatus WINGDIPAPI GdipSaveImageToStream(GpImage *image, IStream* stream, GDIPCONST CLSID* clsidEncoder, GDIPCONST EncoderParameters* encoderParams);
GdipSaveImageToStream = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_void_p)(('GdipSaveImageToStream', windll.gdiplus))
#GpStatus WINGDIPAPI GdipSaveAdd(GpImage *image, GDIPCONST EncoderParameters* encoderParams);
GdipSaveAdd = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipSaveAdd', windll.gdiplus))
#GpStatus WINGDIPAPI GdipSaveAddImage(GpImage *image, GpImage* newImage, GDIPCONST EncoderParameters* encoderParams);
GdipSaveAddImage = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p)(('GdipSaveAddImage', windll.gdiplus))
#===========
# Graphics APIs
#GpStatus WINGDIPAPI GdipFlush(GpGraphics *graphics, GpFlushIntention intention);
GdipFlush = WINFUNCTYPE(c_int, c_void_p, c_int)(('GdipFlush', windll.gdiplus))
#GpStatus WINGDIPAPI GdipCreateFromHDC(HDC hdc, GpGraphics **graphics);
_GdipCreateFromHDC = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipCreateFromHDC', windll.gdiplus))
def GdipCreateFromHDC(hdc):
graphics = c_void_p()
status = _GdipCreateFromHDC(hdc, byref(graphics))
return status, graphics
#GpStatus WINGDIPAPI GdipCreateFromHDC2(HDC hdc, HANDLE hDevice, GpGraphics **graphics);
GdipCreateFromHDC2 = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p)(('GdipCreateFromHDC2', windll.gdiplus))
#GpStatus WINGDIPAPI GdipCreateFromHWND(HWND hwnd, GpGraphics **graphics);
GdipCreateFromHWND = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipCreateFromHWND', windll.gdiplus))
#GpStatus WINGDIPAPI GdipCreateFromHWNDICM(HWND hwnd, GpGraphics **graphics);
GdipCreateFromHWNDICM = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipCreateFromHWNDICM', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDeleteGraphics(GpGraphics *graphics);
GdipDeleteGraphics = WINFUNCTYPE(c_int, c_void_p)(('GdipDeleteGraphics', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetDC(GpGraphics* graphics, HDC * hdc);
_GdipGetDC = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetDC', windll.gdiplus))
def GdipGetDC(graphics):
hdc = c_void_p()
status = _GdipGetDC(graphics, byref(hdc))
return status, hdc
#GpStatus WINGDIPAPI GdipReleaseDC(GpGraphics* graphics, HDC hdc);
GdipReleaseDC = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipReleaseDC', windll.gdiplus))
#GpStatus WINGDIPAPI GdipSetCompositingMode(GpGraphics *graphics, CompositingMode compositingMode);
GdipSetCompositingMode = WINFUNCTYPE(c_int, c_void_p, c_int)(('GdipSetCompositingMode', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetCompositingMode(GpGraphics *graphics, CompositingMode *compositingMode);
_GdipGetCompositingMode = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetCompositingMode', windll.gdiplus))
def GdipGetCompositingMode(graphics):
compositingMode = c_int()
status = _GdipGetCompositingMode(graphics, byref(compositingMode))
return status, compositingMode.value
#GpStatus WINGDIPAPI GdipSetRenderingOrigin(GpGraphics *graphics, INT x, INT y);
GdipSetRenderingOrigin = WINFUNCTYPE(c_int, c_void_p, c_int, c_int)(('GdipSetRenderingOrigin', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetRenderingOrigin(GpGraphics *graphics, INT *x, INT *y);
_GdipGetRenderingOrigin = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p)(('GdipGetRenderingOrigin', windll.gdiplus))
def GdipGetRenderingOrigin(graphics):
x, y = c_int(), c_int()
status = _GdipGetRenderingOrigin(graphics, byref(x), byref(y))
return status, x.value, y.value
#GpStatus WINGDIPAPI GdipSetCompositingQuality(GpGraphics *graphics, CompositingQuality compositingQuality);
GdipSetCompositingQuality = WINFUNCTYPE(c_int, c_void_p, c_int)(('GdipSetCompositingQuality', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetCompositingQuality(GpGraphics *graphics, CompositingQuality *compositingQuality);
_GdipGetCompositingQuality = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetCompositingQuality', windll.gdiplus))
def GdipGetCompositingQuality(graphics):
compositingQuality = c_int()
status = _GdipGetCompositingQuality(graphics, byref(compositingQuality))
return status, compositingQuality.value
#GpStatus WINGDIPAPI GdipSetSmoothingMode(GpGraphics *graphics, SmoothingMode smoothingMode);
GdipSetSmoothingMode = WINFUNCTYPE(c_int, c_void_p, c_int)(('GdipSetSmoothingMode', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetSmoothingMode(GpGraphics *graphics, SmoothingMode *smoothingMode);
_GdipGetSmoothingMode = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetSmoothingMode', windll.gdiplus))
def GdipGetSmoothingMode(graphics):
smoothingMode = c_int()
status = _GdipGetSmoothingMode(graphics, byref(smoothingMode))
return status, smoothingMode.value
#GpStatus WINGDIPAPI GdipSetPixelOffsetMode(GpGraphics* graphics, PixelOffsetMode pixelOffsetMode);
GdipSetPixelOffsetMode = WINFUNCTYPE(c_int, c_void_p, c_int)(('GdipSetPixelOffsetMode', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetPixelOffsetMode(GpGraphics *graphics, PixelOffsetMode *pixelOffsetMode);
_GdipGetPixelOffsetMode = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetPixelOffsetMode', windll.gdiplus))
def GdipGetPixelOffsetMode(graphics):
pixelOffsetMode = c_int()
status = _GdipGetPixelOffsetMode(graphics, byref(pixelOffsetMode))
return status, pixelOffsetMode.value
#GpStatus WINGDIPAPI GdipSetTextRenderingHint(GpGraphics *graphics, TextRenderingHint mode);
GdipSetTextRenderingHint = WINFUNCTYPE(c_int, c_void_p, c_int)(('GdipSetTextRenderingHint', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetTextRenderingHint(GpGraphics *graphics, TextRenderingHint *mode);
_GdipGetTextRenderingHint = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetTextRenderingHint', windll.gdiplus))
def GdipGetTextRenderingHint(graphics):
mode = c_int()
status = _GdipGetTextRenderingHint(graphics, byref(mode))
return status, mode.value
#GpStatus WINGDIPAPI GdipSetTextContrast(GpGraphics *graphics, UINT contrast);
GdipSetTextContrast = WINFUNCTYPE(c_int, c_void_p, c_uint)(('GdipSetTextContrast', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetTextContrast(GpGraphics *graphics, UINT * contrast);
_GdipGetTextContrast = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetTextContrast', windll.gdiplus))
def GdipGetTextContrast(graphics):
contrast = c_uint()
status = _GdipGetTextContrast(graphics, byref(contrast))
return status, contrast.value
#GpStatus WINGDIPAPI GdipSetInterpolationMode(GpGraphics *graphics, InterpolationMode interpolationMode);
GdipSetInterpolationMode = WINFUNCTYPE(c_int, c_void_p, c_int)(('GdipSetInterpolationMode', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetInterpolationMode(GpGraphics *graphics, InterpolationMode *interpolationMode);
_GdipGetInterpolationMode = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetInterpolationMode', windll.gdiplus))
def GdipGetInterpolationMode(graphics):
interpolationMode = c_int()
status = _GdipGetInterpolationMode(graphics, byref(interpolationMode))
return status, interpolationMode.value
#GpStatus WINGDIPAPI GdipSetWorldTransform(GpGraphics *graphics, GpMatrix *matrix);
GdipSetWorldTransform = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipSetWorldTransform', windll.gdiplus))
#GpStatus WINGDIPAPI GdipResetWorldTransform(GpGraphics *graphics);
GdipResetWorldTransform = WINFUNCTYPE(c_int, c_void_p)(('GdipResetWorldTransform', windll.gdiplus))
#GpStatus WINGDIPAPI GdipMultiplyWorldTransform(GpGraphics *graphics, GDIPCONST GpMatrix *matrix, GpMatrixOrder order);
GdipMultiplyWorldTransform = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int)(('GdipMultiplyWorldTransform', windll.gdiplus))
#GpStatus WINGDIPAPI GdipTranslateWorldTransform(GpGraphics *graphics, REAL dx, REAL dy, GpMatrixOrder order);
GdipTranslateWorldTransform = WINFUNCTYPE(c_int, c_void_p, c_float, c_float, c_int)(('GdipTranslateWorldTransform', windll.gdiplus))
#GpStatus WINGDIPAPI GdipScaleWorldTransform(GpGraphics *graphics, REAL sx, REAL sy, GpMatrixOrder order);
GdipScaleWorldTransform = WINFUNCTYPE(c_int, c_void_p, c_float, c_float, c_int)(('GdipScaleWorldTransform', windll.gdiplus))
#GpStatus WINGDIPAPI GdipRotateWorldTransform(GpGraphics *graphics, REAL angle, GpMatrixOrder order);
GdipRotateWorldTransform = WINFUNCTYPE(c_int, c_void_p, c_float, c_int)(('GdipRotateWorldTransform', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetWorldTransform(GpGraphics *graphics, GpMatrix *matrix);
GdipGetWorldTransform = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetWorldTransform', windll.gdiplus))
#GpStatus WINGDIPAPI GdipResetPageTransform(GpGraphics *graphics);
GdipResetPageTransform = WINFUNCTYPE(c_int, c_void_p)(('GdipResetPageTransform', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetPageUnit(GpGraphics *graphics, GpUnit *unit);
GdipGetPageUnit = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetPageUnit', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetPageScale(GpGraphics *graphics, REAL *scale);
GdipGetPageScale = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetPageScale', windll.gdiplus))
#GpStatus WINGDIPAPI GdipSetPageUnit(GpGraphics *graphics, GpUnit unit);
GdipSetPageUnit = WINFUNCTYPE(c_int, c_void_p, c_int)(('GdipSetPageUnit', windll.gdiplus))
#GpStatus WINGDIPAPI GdipSetPageScale(GpGraphics *graphics, REAL scale);
GdipSetPageScale = WINFUNCTYPE(c_int, c_void_p, c_float)(('GdipSetPageScale', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetDpiX(GpGraphics *graphics, REAL* dpi);
GdipGetDpiX = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetDpiX', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetDpiY(GpGraphics *graphics, REAL* dpi);
GdipGetDpiY = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetDpiY', windll.gdiplus))
#GpStatus WINGDIPAPI GdipTransformPoints(GpGraphics *graphics, GpCoordinateSpace destSpace, GpCoordinateSpace srcSpace, GpPointF *points, INT count);
GdipTransformPoints = WINFUNCTYPE(c_int, c_void_p, c_int, c_int, c_void_p, c_int)(('GdipTransformPoints', windll.gdiplus))
#GpStatus WINGDIPAPI GdipTransformPointsI(GpGraphics *graphics, GpCoordinateSpace destSpace, GpCoordinateSpace srcSpace, GpPoint *points, INT count);
GdipTransformPointsI = WINFUNCTYPE(c_int, c_void_p, c_int, c_int, c_void_p, c_int)(('GdipTransformPointsI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipGetNearestColor(GpGraphics *graphics, ARGB* argb);
GdipGetNearestColor = WINFUNCTYPE(c_int, c_void_p, c_void_p)(('GdipGetNearestColor', windll.gdiplus))
# Creates the Win9x Halftone Palette (even on NT) with correct Desktop colors
#HPALETTE WINGDIPAPI GdipCreateHalftonePalette();
GdipCreateHalftonePalette = WINFUNCTYPE(c_void_p)(('GdipCreateHalftonePalette', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawLine(GpGraphics *graphics, GpPen *pen, REAL x1, REAL y1, REAL x2, REAL y2);
GdipDrawLine = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_float, c_float, c_float, c_float)(('GdipDrawLine', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawLineI(GpGraphics *graphics, GpPen *pen, INT x1, INT y1, INT x2, INT y2);
GdipDrawLineI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int, c_int)(('GdipDrawLineI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawLines(GpGraphics *graphics, GpPen *pen, GDIPCONST GpPointF *points, INT count);
GdipDrawLines = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_int)(('GdipDrawLines', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawLinesI(GpGraphics *graphics, GpPen *pen, GDIPCONST GpPoint *points, INT count);
GdipDrawLinesI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_int)(('GdipDrawLinesI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawArc(GpGraphics *graphics, GpPen *pen, REAL x, REAL y, REAL width, REAL height, REAL startAngle, REAL sweepAngle);
GdipDrawArc = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_float, c_float, c_float, c_float, c_float, c_float)(('GdipDrawArc', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawArcI(GpGraphics *graphics, GpPen *pen, INT x, INT y, INT width, INT height, REAL startAngle, REAL sweepAngle);
GdipDrawArcI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int, c_int, c_float, c_float)(('GdipDrawArcI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawBezier(GpGraphics *graphics, GpPen *pen, REAL x1, REAL y1, REAL x2, REAL y2, REAL x3, REAL y3, REAL x4, REAL y4);
GdipDrawBezier = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_float, c_float, c_float, c_float, c_float, c_float, c_float, c_float)(('GdipDrawBezier', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawBezierI(GpGraphics *graphics, GpPen *pen, INT x1, INT y1, INT x2, INT y2, INT x3, INT y3, INT x4, INT y4);
GdipDrawBezierI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int, c_int, c_int, c_int, c_int, c_int)(('GdipDrawBezierI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawBeziers(GpGraphics *graphics, GpPen *pen, GDIPCONST GpPointF *points, INT count);
GdipDrawBeziers = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_int)(('GdipDrawBeziers', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawBeziersI(GpGraphics *graphics, GpPen *pen, GDIPCONST GpPoint *points, INT count);
GdipDrawBeziersI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_int)(('GdipDrawBeziersI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawRectangle(GpGraphics *graphics, GpPen *pen, REAL x, REAL y, REAL width, REAL height);
GdipDrawRectangle = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_float, c_float, c_float, c_float)(('GdipDrawRectangle', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawRectangleI(GpGraphics *graphics, GpPen *pen, INT x, INT y, INT width, INT height);
GdipDrawRectangleI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int, c_int)(('GdipDrawRectangleI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawRectangles(GpGraphics *graphics, GpPen *pen, GDIPCONST GpRectF *rects, INT count);
GdipDrawRectangles = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_int)(('GdipDrawRectangles', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawRectanglesI(GpGraphics *graphics, GpPen *pen, GDIPCONST GpRect *rects, INT count);
GdipDrawRectanglesI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_int)(('GdipDrawRectanglesI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawEllipse(GpGraphics *graphics, GpPen *pen, REAL x, REAL y, REAL width, REAL height);
GdipDrawEllipse = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_float, c_float, c_float, c_float)(('GdipDrawEllipse', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawEllipseI(GpGraphics *graphics, GpPen *pen, INT x, INT y, INT width, INT height);
GdipDrawEllipseI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int, c_int)(('GdipDrawEllipseI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawPie(GpGraphics *graphics, GpPen *pen, REAL x, REAL y, REAL width, REAL height, REAL startAngle, REAL sweepAngle);
GdipDrawPie = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_float, c_float, c_float, c_float)(('GdipDrawPie', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawPieI(GpGraphics *graphics, GpPen *pen, INT x, INT y, INT width, INT height, REAL startAngle, REAL sweepAngle);
GdipDrawPieI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int, c_int, c_float, c_float)(('GdipDrawPieI', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawPolygon(GpGraphics *graphics, GpPen *pen, GDIPCONST GpPointF *points, INT count);
GdipDrawPolygon = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_int)(('GdipDrawPolygon', windll.gdiplus))
#GpStatus WINGDIPAPI GdipDrawPolygonI(GpGraphics *graphics, GpPen *pen, GDIPCONST GpPoint *points, INT count);
GdipDrawPolygonI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_void_p, c_int)(('GdipDrawPolygonI', windll.gdiplus))
#...........
#GpStatus WINGDIPAPI GdipFillEllipse(GpGraphics *graphics, GpBrush *brush, REAL x, REAL y, REAL width, REAL height);
GdipFillEllipse = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_float, c_float, c_float, c_float)(('GdipFillEllipse', windll.gdiplus))
#GpStatus WINGDIPAPI GdipFillEllipseI(GpGraphics *graphics, GpBrush *brush, INT x, INT y, INT width, INT height);
GdipFillEllipseI = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int, c_int)(('GdipFillEllipseI', windll.gdiplus))
|
Answeror/lit
|
pywingui/gdiplusflat.py
|
Python
|
mit
| 24,068
|
from SublimeLinter.lint import Linter, util
class JSL(Linter):
cmd = 'jsl -stdin -nologo -nosummary'
regex = r'''(?xi)
# First line is (lineno): type: error message
^\((?P<line>\d+)\):.*?(?:(?P<warning>warning)|(?P<error>error)):\s*(?P<message>.+)$\r?\n
# Second line is the line of code
^.*$\r?\n
# Third line is a caret pointing to the position of the error
^(?P<col>[^\^]*)\^
'''
multiline = True
error_stream = util.STREAM_STDOUT
defaults = {
'selector': 'source.js - meta.attribute-with-value'
}
|
SublimeLinter/SublimeLinter-jsl
|
linter.py
|
Python
|
mit
| 589
|
"""
WSGI config for eatuxchallenge project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eatuxchallenge.settings")
application = get_wsgi_application()
|
ask5/eatuxchallenge
|
eatuxchallenge/wsgi.py
|
Python
|
mit
| 405
|
from signals import post_save_renderer
|
botify-labs/django-medusa
|
django_medusa/models.py
|
Python
|
mit
| 39
|
# Python3
from solution1 import sumUpNumbers as f
qa = [
('2 apples, 12 oranges', 14),
('123450', 123450),
('Your payment method is invalid', 0),
('no digits at all', 0),
('there are some (12) digits 5566 in this 770 string 239', 6587)
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
|
RevansChen/online-judge
|
Codefights/arcade/intro/level-12/54.sumUpNumbers/Python/test.py
|
Python
|
mit
| 565
|
__author__ = 'jie'
import string
from operator import attrgetter
from strandrep.create_toehold_command import CreateToeholdCommand
from strandrep.remove_toehold_command import RemoveToeholdCommand
import cadnano.util as util
from cadnano.cnproxy import ProxyObject, ProxySignal
class Domain(ProxyObject):
def __init__(self,strandset,low_idx,high_idx,bs_low = None, bs_high = None, hyb_strand=None):
super(Domain, self).__init__(strandset)
self._doc = strandset.document()
self._hyb_strand_idx = hyb_strand
self._index = strandset.length()
self._vh = strandset._virtual_helix
self._vhNum = self._vh._number
self._type = strandset._strand_type
# domain naming
if self._type == 0:
self._type_str = 'scaf'
self._name = string.ascii_lowercase[self._index]+str(self._vhNum)
elif self._type == 1:
self._type_str ='stap'
self._name = string.ascii_lowercase[self._index]+str(self._vhNum)+'*'
else:
self._type = 'overhang'
self._name = 'T'+ str(self._vhNum)+str(self._index) # fix naming
self._sequence = None
#coordinates and indexes
self._bs_low = bs_low
self._bs_high = bs_high
self._strandset = strandset
self._low_idx = low_idx
self._high_idx = high_idx
self._length = high_idx-low_idx+1
# domain3p and domain5p refer to previous and proceeding node in scaf/stap linked list
self._domain_3p = None
self._domain_5p = None
# connection3p and connection5p refer to xover to different domains
self._connection_5p = None
self._connection_3p = None
self._is_5p_connection_xover = None
self._is_3p_connection_xover = None
self._hyb_domain = None
self._unhyb_domain = 0;
# orientation on virtual helix
self._is_drawn_5_to_3 = self._strandset.isDrawn5to3()
if self._is_drawn_5_to_3:
self.idx5Prime = self.lowIdx
self.idx3Prime = self.highIdx
self.connectionLow = self.connection5p
self.connectionHigh = self.connection3p
self.setConnectionLow = self.setConnection5p
self.setConnectionHigh = self.setConnection3p
self.setIsConnectionHighXover = self.setIs3pConnectionXover
self.setIsConnectionLowXover = self.setIs5pConnectionXover
self.isConnectionHighXover = self.is3pConnectionXover
self.isConnectionLowXover = self.is5pConnectionXover
else:
self.idx5Prime = self.highIdx
self.idx3Prime = self.lowIdx
self.connectionLow = self.connection3p
self.connectionHigh = self.connection5p
self.setConnectionLow = self.setConnection3p
self.setConnectionHigh = self.setConnection5p
self.setIsConnectionHighXover = self.setIs5pConnectionXover
self.setIsConnectionLowXover = self.setIs3pConnectionXover
self.isConnectionHighXover = self.is5pConnectionXover
self.isConnectionLowXover = self.is3pConnectionXover
if self._hyb_strand_idx > 0:
self._loop = False
else:
self._loop = True
# properties used for creating toehold
self._toehold_list_3p = None
self._toehold_list_5p = None
self._last_toehold_cmd = None
# refers to the 5' or 3' domain actually operated on in creating toehold
self._endDomain = None
### Singals
togglePreviewRipOffSignal = ProxySignal(name="togglePreviewRipOffSignal") #previewRipOffSlot in strand item, toggle hide/show of strand item
toeholdremovedSignal = ProxySignal(object,object,name='toeholdRemovedSignal') # toeholdRemovedSlot in strand item
toeholdAddedSignal = ProxySignal(object,object,name = 'toeholdAddedSignal') # toeholdAddedSlot in strand item
strandHasNewOligoSignal = ProxySignal(ProxyObject, name='strandHasNewOligoSignal') # hasNewOligoSlot in abstract strand item
strandUpdateSignal = ProxySignal(ProxyObject, name='strandUpdateSignal') #pyqtSignal(QObject)
strandRemovedSignal = ProxySignal(ProxyObject, name='strandRemovedSignal') #pyqtSignal(QObject) # strand
strandSelectedSignal = ProxySignal(ProxyObject,name='strandSelectedSignal')
strandRemovedFromSelectionSignal = ProxySignal(ProxyObject,name='strandRemovedFromSelectionSignal')
def strandFilter(self):
return self._strandset.strandFilter()
def setName(self,scaf_index):
if scaf_index == -1:
self._name = str(self._vhNum) + "M" + str(self._unhyb_domain)
self._unhyb_domain += 1
else:
try:
self._name = string.ascii_lowercase[scaf_index] + str(self._vhNum) + "*"
except:
print("self_idx = %d, vh = %d" %(self.lowIdx(),self._vhNum))
def name(self):
return self._name
def oligo(self):
return self._oligo
def isStaple(self):
return self._strandset._strand_type == 1
def generator5pStrand(self):
node0 = node = self
f = attrgetter('_connection_5p')
# while node and originalCount == 0:
# yield node # equivalent to: node = node._strand5p
# node = f(node)
# if node0 == self:
# originalCount += 1
while node:
yield node # equivalent to: node = node._strand5p
node = f(node)
if node0 == node:
break
def generator3pStrand(self):
# return a generator of all strand linked in the 3' direction
node0 = node = self
f = attrgetter('_connection_3p')
while node:
yield node # equivalent to: node = node._strand5p
node = f(node)
if node0 == node:
break
def connection3p(self):
return self._connection_3p
def connection5p(self):
return self._connection_5p
def idxs(self):
return (self._low_idx, self._high_idx)
def lowIdx(self):
return self._low_idx
# end def
def highIdx(self):
return self._high_idx
# end def
def strandSet(self):
return self._strandset
def setConnection3p(self, strand):
self._connection_3p = strand
# end def
def setConnection5p(self, strand):
self._connection_5p = strand
# end def
def length(self):
return self._length
def virtualHelix(self):
return self._vh
def oligo(self):
return self._oligo
def document(self):
return self._doc
def setDomain5p(self,domain):
self._domain_5p = domain
def setDomain3p(self,domain):
self._domain_3p = domain
def setToehold3p(self,toeholdList):
self._toehold_list_3p = toeholdList
def setToehold5p(self,toeholdList):
self._toehold_list_5p = toeholdList
def toeholdList3p(self):
return self._toehold_list_3p
def toeholdList5p(self):
return self._toehold_list_5p
def setOligo(self, new_oligo, emit_signal=True):
self._oligo = new_oligo
if emit_signal:
self.strandHasNewOligoSignal.emit(self)
def totalLength(self):
"""
includes the length of insertions in addition to the bases
"""
tL = 0
insertions = self.insertionsOnStrand()
for insertion in insertions:
tL += insertion.length()
return tL + self.length()
def insertionsOnStrand(self, idxL=None, idxH=None):
"""
if passed indices it will use those as a bounds
"""
insertions = []
coord = self.virtualHelix().coord()
insertionsDict = self.part().insertions()[coord]
sortedIndices = sorted(insertionsDict.keys())
if idxL == None:
idxL, idxH = self.idxs()
for index in sortedIndices:
insertion = insertionsDict[index]
if idxL <= insertion.idx() <= idxH:
insertions.append(insertion)
# end if
# end for
return insertions
def part(self):
return self._strandset.part()
def sequence(self, for_export=False):
seq = self._sequence
if seq:
return util.markwhite(seq) if for_export else seq
elif for_export:
return ''.join(['?' for x in range(self.totalLength())])
return ''
# end def
def isScaffold(self):
return self._strandset.isScaffold()
def is5pConnectionXover(self):
if self._is_5p_connection_xover is not None:
return self._is_5p_connection_xover
else:
return False
def is3pConnectionXover(self):
if self._is_3p_connection_xover is not None:
return self._is_3p_connection_xover
else:
return False
def setIs5pConnectionXover(self,bool):
self._is_5p_connection_xover = bool
def setIs3pConnectionXover(self,bool):
self._is_3p_connection_xover = bool
def toehold3p(self):
return self._toehold_list_3p
def toehold5p(self):
return self._toehold_list_5p
def toeholdChanged(self,prime,checked=True):
# called by dock widget to delete/add toehold
'''
doesn't create/delete toehold on self;
instead, get corresponding end domain on oligo and apply operation on end domain;
'''
if prime == 3:
endDomain = self._oligo._domain3p
else:
endDomain = self._oligo._domain5p
self._endDomain = endDomain
# get dict for state of toehold commands
dict = self._oligo._toehold_cmd_dict
cmd = CreateToeholdCommand(self._vh,endDomain,prime)
if checked: #create new toehold at a prime
cmd.redo()
dict[prime] = cmd
else: # remove a toehold at a prime
cmd.undo()
dict[prime] = None
def createToehold(self,toehold,use_undostack = True):
# put removed toeholds back, no need to check if_can_create_toehold
cmd = CreateToeholdCommand(self._vh,toehold._domain,toehold._prime)
list = [cmd]
d = "createToehold %s at %s".format(toehold._name,toehold._domain._name)
util.execCommandList(self,list,d,use_undostack=use_undostack)
def hasToehold(self):
return (self.toehold3p() is not None) or (self.toehold5p() is not None)
def toeholds(self):
t = []
if self.toehold3p():
t.append(self.toehold3p())
if self.toehold5p():
t.append(self.toehold5p())
return t
def canCreateToeholdAt(self,prime):
'''
return true if endDomain has no connection to other domains at prime and no
toehold has already been created at prime
'''
if self._oligo._is_loop:
return False
# get endDomain reference for oligo
if prime == 3:
curr = curr0 = self
while curr._connection_3p is not None:
curr = curr._connection_3p
if curr == curr0:
break
self._oligo._domain3p = curr
return (curr.connection3p() is None) and (curr.toehold3p() is None)
elif prime == 5:
curr = curr0 = self
while curr._connection_5p is not None:
curr = curr._connection_5p
if curr == curr0:
break
self._oligo._domain5p = curr
return (curr.connection5p() is None) and (curr.toehold5p() is None)
def toeholdChangeAccepted(self):
# triggered when user accepts a toehold operation
dict = self._oligo._toehold_cmd_dict
stack = []
'''
reset all toehold command state to None in oligo command dict;
undo all executed command, command will be redo-ed after pushed onto undo stack;
'''
for prime,cmd in dict.iteritems():
if cmd is not None:
stack.append(cmd)
cmd.undo()
dict[prime] = None
d = '%s create toehold' % self._name
# record the accepted sequence of command as a macro
util.execCommandList(self,stack,d,use_undostack=True)
def toeholdChangeRejected(self):
# undo all executed command
if self._endDomain is None:
return
dict = self._oligo._toehold_cmd_dict
for prime,cmd in dict.iteritems():
if cmd is not None:
cmd.undo()
dict[prime] = None
def removeToehold(self,toehold,use_undostack=True):
cmd = RemoveToeholdCommand(self,toehold)
stack=[cmd]
d = '%s remove toehold' % self._name
util.execCommandList(self,stack,d,use_undostack=use_undostack)
def undoStack(self):
return self._strandset.undoStack()
def merge_with(self,domain):
'''
method to merge self with another domain;
Need to: adjust index; rm second domain; rm tick mark (emit signal to notify strand_item
after model is changed); inherit connections, toeholds,domain3p/5p from second domain;
'''
# adjust indices & domain_3p/5p reference
if self._low_idx > domain._low_idx:
self._low_idx = domain._low_idx
if self._is_drawn_5_to_3:
self._domain_5p = domain._domain_5p
if self._domain_5p:
self._domain_5p._domain_3p = self
else:
self._domain_3p = domain._domain_3p
if self._domain_3p:
self._domain_3p._domain_5p = self
else:
self._high_idx = domain._high_idx
if self._is_drawn_5_to_3:
self._domain_3p = domain._domain_3p
if self._domain_3p:
self._domain_3p._domain_5p = self
else:
self._domain_5p = domain._domain_5p
if self._domain_5p:
self._domain_5p._domain_3p = self
# length
self._length = self.length() + domain.length()
# domain name
self._name = self._name+"_"+ domain._name
# toehold and connection
if domain.connection3p() and domain.connection3p() != self:
self.setConnection3p(domain.connection3p())
self.setIs3pConnectionXover(domain.is3pConnectionXover())
if domain.connection5p() and domain.connection5p() != self:
self.setConnection5p(domain.connection5p())
self.setIs5pConnectionXover(domain.is5pConnectionXover())
if not domain.connection3p():
self.setConnection3p(None)
if not domain.connection5p():
self.setConnection5p(None)
if domain.toehold3p():
self.setToehold3p(domain.toehold3p())
if domain.toehold5p():
self.setToehold5p(domain.toehold5p())
#destroy second strand, refresh oligo
domain.strandRemovedSignal.emit(domain)
self.strandUpdateSignal.emit(self)
|
amylittleyang/OtraCAD
|
strandrep/domain.py
|
Python
|
mit
| 15,203
|
from configparser import ConfigParser
from astropy.utils.console import color_print
import os
import numpy as np
params = ConfigParser()
if os.path.isfile('zparams_pm.py'):
params.read('zparams_pm.py')
else:
folder = os.path.dirname(os.path.realpath(__file__))
params.read('%s/zparams_pm.py' % folder)
print ''
color_print('###############################################', 'yellow')
color_print('#', 'yellow', ' ', 'lightcyan', '#', 'yellow')
color_print('#', 'yellow', ' zparams_pm.py no encontrado en la carpeta ', 'white', '#', 'yellow')
color_print('#', 'yellow', '\033[1m' + ' Utilizando parametros por defecto ', 'lightred', '#', 'yellow')
color_print('#', 'yellow', ' ', 'lightcyan', '#', 'yellow')
color_print('###############################################', 'yellow')
print ''
#Regresion Lineal
def linear_regression(x, y, w):
A = np.vander(x,2)
W = np.diag(w)
ATWA = np.dot(A.T, np.dot(W, A))
#ATA = np.dot(A.T, A / yerr[:, np.newaxis]**2)
sig_w = np.linalg.inv(ATWA)
mu_w = np.linalg.solve(ATWA, np.dot(A.T, np.dot(W, y)))
return mu_w, sig_w
def get_script():
radio, itera, max_err = np.array(params['SCRIPT.PY'].values())
radio = float(radio)
itera = int(itera)
max_err = float(max_err)
output = str(params['TLINEAL_1a1.PY']['output'])
refer = str(params['TLINEAL_1a1.PY']['refer'])
nframes = int(params['PM_1a1.PY']['nframes'])
min_ep = int(params['VPDHmag.PY']['min_ep'])
return radio, itera, output, refer, nframes, min_ep, max_err
def get_VPDHmag():
limplotvp, magl, magh, delta, min_ep, min_nei, sigma_err = np.array(params['VPDHmag.PY'].values()).astype('float')
mags = magl, magh
return limplotvp, mags, delta, min_ep, min_nei, sigma_err
def get_tlineal():
nrefstars_tl, min_nei, rad_int, rad_ext, output, refer, sort_mag, \
local, ma1, ma2, mr1, mr2, mp1, mp2, rad_ref, x0, y0, lim, plot_ep, plot_del_ep, plot_del_xy, nprocs_tl = params['TLINEAL_1a1.PY'].values()
nrefstars_tl, min_nei, rad_int, rad_ext, ma1, ma2, mr1, mr2, mp1, mp2, rad_ref, x0, y0, lim, nprocs_tl = \
np.array([nrefstars_tl, min_nei, rad_int, rad_ext, ma1, ma2, mr1, mr2, mp1, mp2, rad_ref, x0, y0, lim, nprocs_tl]).astype(float)
refer = str(refer)
output = str(output)
nprocs_tl = int(nprocs_tl)
sort_mag = str(sort_mag) == 'True'
local = str(local) == 'True'
plot_ep = str(plot_ep) == 'True'
plot_del_ep = str(plot_del_ep) == 'True'
plot_del_xy = str(plot_del_xy) == 'True'
return nrefstars_tl, min_nei, rad_int, rad_ext, output, refer, sort_mag, \
local, ma1, ma2, mr1, mr2, mp1, mp2, rad_ref, x0, y0, lim, plot_ep, plot_del_ep, plot_del_xy, nprocs_tl
def get_CMD():
cmd_modo, match, col1, col2, mag1, mag2, cmd_pdf = params['CMD.PY'].values()
col1, col2, mag1, mag2 = np.array([col1, col2, mag1, mag2]).astype(float)
cmd_modo = str(cmd_modo)
match = str(match)
cmd_pdf = str(cmd_pdf) == 'True'
return cmd_modo, match, col1, col2, mag1, mag2, cmd_pdf
def get_master_stilts():
match_tol = float(params['MASTER_STILTS.PY']['match_tol'])
modo_ms = str(params['MASTER_STILTS.PY']['modo_ms'])
return match_tol, modo_ms
def get_master_match():
min_epochs_mm = int(params['MASTER_MATCH_ID.PY']['min_epochs_mm'])
nprocs_mmi = int(params['MASTER_MATCH_ID.PY']['nprocs_mmi'])
return min_epochs_mm, nprocs_mmi
def get_mastercat():
min_epochs, min_mag, max_mag, max_err, iteraciones, iteracion2, nrefstars_mc, nprocs_mc = params['MASTERCAT.PY'].values()
min_epochs, min_mag, max_mag, max_err, iteraciones, nrefstars_mc, nprocs_mc = np.array([min_epochs, min_mag, max_mag, max_err, iteraciones, nrefstars_mc, nprocs_mc]).astype(float)
iteracion2 = str(iteracion2)
return min_epochs, min_mag, max_mag, max_err, iteraciones, iteracion2, nrefstars_mc, nprocs_mc
def get_match1a1():
modo_ma = str(params['MATCH_1a1.PY']['modo_ma'])
tol = float(params['MATCH_1a1.PY']['tol'])
nprocs_m1a1 = int(params['MATCH_1a1.PY']['nprocs_m1a1'])
return modo_ma, tol, nprocs_m1a1
def get_pm1a1():
nframes, nbins, limplotpm, nprocs_pm, sig_iter, nsigma, weight = params['PM_1a1.PY'].values()
nframes, nbins, limplotpm, nprocs_pm, sig_iter, nsigma = np.array([nframes, nbins, limplotpm, nprocs_pm, sig_iter, nsigma]).astype(float)
nprocs_pm = int(nprocs_pm)
sig_iter = int(sig_iter)
nsigma = int(nsigma)
weight = str(weight) == 'True'
return nframes, nbins, limplotpm, nprocs_pm, sig_iter, nsigma, weight
def get_XYtoRADEC():
nprocs = np.array(params['XYtoRADEC.PY'].values()).astype(float)
return nprocs
#ProgressBar con multiprocess (elegir numero de procesadores)
from astropy.utils.console import ProgressBar
import multiprocessing
def barra(funcion, items, cpus):
results = []
with ProgressBar(len(items)) as bar:
p = multiprocessing.Pool(processes=cpus)
for jj, result in enumerate(p.imap(funcion, items)):
bar.update(jj)
results.append(result)
p.close()
p.join()
return results
|
astrofelipe/MP2015
|
pm_funcs.py
|
Python
|
mit
| 5,286
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .property_batch_operation import PropertyBatchOperation
class DeletePropertyBatchOperation(PropertyBatchOperation):
"""Represents a PropertyBatchOperation that deletes a specified property if it
exists. Note that if one PropertyBatchOperation in a PropertyBatch fails,
the entire batch fails and cannot be committed in a transactional manner.
.
:param property_name:
:type property_name: str
:param kind: Polymorphic Discriminator
:type kind: str
"""
_validation = {
'property_name': {'required': True},
'kind': {'required': True},
}
def __init__(self, property_name):
super(DeletePropertyBatchOperation, self).__init__(property_name=property_name)
self.kind = 'Delete'
|
AutorestCI/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/delete_property_batch_operation.py
|
Python
|
mit
| 1,235
|
import cPickle as pickle
import h5py
import numpy as np
fName = "test.pickle"
with open(fName,"rb") as f:
d = pickle.load(f)
print d.keys()
h5f = h5py.File(fName.replace(".pickle",".h5f"),"w")
h5f.create_group("Info")
h5f.create_group("Original")
h5f.create_group("ZCA_byTrainData")
h5f.create_group("ZCA_byTestData")
h5f.create_dataset("ZCA_byTrainData/data" ,data=d["data_ZCA_byTrainData"])
h5f.create_dataset("Original/data" ,data=d["data"])
h5f.create_dataset("ZCA_byTestData/data" ,data=d["data_ZCA_byTestData"])
h5f.create_dataset("Info/coarse_labels",data=np.array(d["coarse_labels"],dtype=np.int32))
h5f.create_dataset("Info/fine_labels" ,data=np.array(d["fine_labels"],dtype=np.int32))
#h5f.create_dataset("Info/labels" ,data=np.array(d["labels"],dtype=np.int32))
dunicode = h5py.special_dtype(vlen=unicode)
h5f.create_dataset("Info/filenames" ,data=d["filenames"],dtype=dunicode)
h5f.create_dataset("Info/batch_label" ,data=d["batch_label"],dtype=dunicode)
#h5f.create_dataset("Info/filenames" ,data=np.array(d["filenames"],dtype=np.unicode))
#h5f.create_dataset("Info/batch_label" ,data=np.array([d["batch_label"]],dtype=np.unicode))
for i in ['std', 'Uzca', 'C', 'lam', 'eps', 'U', 'V', 'mean']:
h5f.create_dataset("ZCA_byTrainData/params_%s"%i ,data=d["Params_ZCA_byTrainData"][i])
for i in ['std', 'Uzca', 'C', 'lam', 'eps', 'U', 'V', 'mean']:
h5f.create_dataset("ZCA_byTestData/params_%s"%i ,data=d["Params_ZCA_byTestData"][i])
h5f.flush()
h5f.close()
|
ysasaki6023/NeuralNetworkStudy
|
data_cifar100/convert_to_hdf5.py
|
Python
|
mit
| 1,514
|
from rest_framework.exceptions import APIException
from rest_framework import status
class ConflictException(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = "Resource already exists."
|
ministryofjustice/cla_backend
|
cla_backend/apps/core/drf/exceptions.py
|
Python
|
mit
| 217
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='linuxsecaudit',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.2.0',
description='A Linux desktop security configuration audit tool.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/pantheon-systems/linuxsecaudit',
# Author details
author='David Strauss',
author_email='david@davidstrauss.net',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='security pci compliance',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
#install_requires=['peppercorn'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
#extras_require = {
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[('/etc/systemd/system', ['systemd/linuxsecaudit.service']),
('/etc/systemd/system', ['systemd/linuxsecaudit.timer'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'linuxsecaudit=linuxsecaudit:main',
],
},
)
|
pantheon-systems/linuxsecaudit
|
setup.py
|
Python
|
mit
| 3,175
|
#!/usr/bin/env python3
# transpiled with BefunCompile v1.3.0 (c) 2017
import gzip, base64
_g = ("AR+LCAAAAAAABADtkM0KwjAQhF+l4i1LZLZV/EGCTxJ722tOOfXh3WCNSBtQ8LhzyWSHfJkkd38V1mQ0oxnNaEYzWpsWvmXlVhDaCSDsGSLFFC/0dCkHBhIzUtCWsj/3"
+ "jnTu3VQODWU7buO9EliTd5UEJhZwAryuuNYoalTGpOlHsOjGc5NlFKubNl03Uu8OJypPALe/56W1G287ubjj0C7zgx7G7Dw7OggAAA==")
g = base64.b64decode(_g)[1:]
for i in range(base64.b64decode(_g)[0]):
g = gzip.decompress(g)
g=list(g)
def gr(x,y):
if(x>=0 and y>=0 and x<78 and y<27):
return g[y*78 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<78 and y<27):
g[y*78 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
def _0():
gw(0,0,1)
gw(1,0,1)
return 1
def _1():
return (3)if((gr(0,0)*(22-gr(1,0)))!=0)else(2)
def _2():
gw(0,0,gr(0,0)+gr(1,0))
gw(1,0,1)
return 1
def _3():
return (4)if(gr(0,0)>21)else(6)
def _4():
gw(0,0,gr(0,0)-1)
gw(1,0,gr(1,0)+1)
return (5)if((gr(1,0)+gr(0,0))>42)else(1)
def _5():
print(gr(21,21),end=" ",flush=True)
return 9
def _6():
return (8)if(((gr(0,0)-1)*(gr(1,0)-1))!=0)else(7)
def _7():
gw(gr(0,0),gr(1,0),1)
return 4
def _8():
gw(gr(0,0),gr(1,0),gr(gr(0,0)-1,gr(1,0))+gr(gr(0,0),gr(1,0)-1))
return 4
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8]
c=0
while c<9:
c=m[c]()
|
Mikescher/Project-Euler_Befunge
|
compiled/Python3/Euler_Problem-015.py
|
Python
|
mit
| 1,367
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File & Directory
# 2015-08-13 10:49:14
import os
print os.name
print os.uname()
print os.environ
print os.getenv('PATH')
print os.path.abspath('.')
# 在某个目录下创建一个新目录,
# 首先把新目录的完整路径表示出来:
os.path.join('/Users/luolong', 'testdir')
'/Users/luolong/testdir'
# 然后创建一个目录:
os.mkdir('/Users/luolong/testdir')
# 删掉一个目录:
os.rmdir('/Users/luolong/testdir')
|
longluo/LearnPython
|
io/file_and_dir.py
|
Python
|
mit
| 492
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Send message to Slack from command line
"""
from slacker import Slacker
from slacker.utils import get_item_id_by_name
import argparse
import sys
import os
import warnings
warnings.filterwarnings('ignore', message=".*InsecurePlatformWarning.*")
def post_message(token, channel, message, name, as_user, icon):
slack = Slacker(token)
slack.chat.post_message(channel, message, username=name,
as_user=as_user, icon_emoji=icon)
def get_channel_id(token, channel_name):
slack = Slacker(token)
channels = slack.channels.list().body['channels']
return get_item_id_by_name(channels, channel_name)
def upload_file(token, channel, file_name):
""" upload file to a channel """
slack = Slacker(token)
channel_id = get_channel_id(token, channel)
slack.files.upload(file_name, channels=channel_id)
def args_priority(args, environ):
'''
priority of token
1) as argumment: -t
2) as environ variable
priority of as_user
1) as argument: -a
2) as environ variable
'''
arg_token = args.token
arg_as_user = args.as_user
slack_token_var_name = 'SLACK_TOKEN'
if slack_token_var_name in environ.keys():
token = environ[slack_token_var_name]
else:
token = None
if arg_token:
token = arg_token
# slack as_user
slack_as_user_var_name = 'SLACK_AS_USER'
as_user = bool(environ.get(slack_as_user_var_name))
if arg_as_user:
as_user = True
return token, as_user, args.channel
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--channel", help="Slack channel")
parser.add_argument("-u", "--user", help="Slack user")
parser.add_argument("-t", "--token", help="Slack token")
parser.add_argument("-f", "--file", help="File to upload")
parser.add_argument("-n", "--name", help="Sender name")
parser.add_argument("-a", "--as-user", action="store_true", help="As user")
parser.add_argument("-i", "--icon-emoji", help="Sender emoji icon")
args = parser.parse_args()
token, as_user, channel = args_priority(args, os.environ)
user = args.user
name = args.name
icon = args.icon_emoji
message = sys.stdin.read()
file_name = args.file
if token and channel and message:
post_message(token, '#' + channel, message, name, as_user, icon)
if token and user and message:
post_message(token, '@' + user, message, name, as_user, icon)
if token and channel and file_name:
upload_file(token, channel, file_name)
if __name__ == '__main__':
main()
|
beni55/slacker-cli
|
slacker_cli/__init__.py
|
Python
|
mit
| 2,691
|
'''
Created on Feb 14, 2011
@author: al
'''
from bebop import *
from unittest import TestCase
class FooDB(object):
def __init__(self, **kw):
for attr, val in kw.iteritems():
setattr(self, attr, val)
class BarDB(object):
def __init__(self, **kw):
for attr, val in kw.iteritems():
setattr(self, attr, val)
@SearchIndex('foo')
class Foo(SearchableModel):
_target=FooDB
id = DocumentId('id', Integer, model_attr='id')
name = Field('name', Title, model_attr='name')
@SearchIndex('bar', config=DismaxSolrConfig)
class Bar(SearchableModel):
_target=BarDB
id = DocumentId('id', Integer, model_attr='id')
name = Field('name', Title, model_attr='name')
class TestModel(TestCase):
def test_internals(self):
self.assertEquals(Foo.__index__, 'foo')
self.assertEquals(Foo._fields, ['id', 'name'])
def test_equals(self):
clause = Foo.name == 'blah'
self.assertEquals(clause, "name:blah")
def test_boolean_clause(self):
clause = and_(Foo.id > 5, or_(Foo.name=='blah', Foo.name=='blee'))
self.assertEquals(clause, "(id:[5 TO *] AND (name:blah OR name:blee))")
|
jumoconnect/openjumo
|
jumodjango/lib/bebop/test/test_model.py
|
Python
|
mit
| 1,205
|
import os
import subprocess
import json
import shlex
import math
path = '/usr/local/bin'
if path not in os.environ["PATH"]:
os.environ["PATH"] += os.pathsep + path
def probeFile(file):
cmd = "ffprobe -v quiet -print_format json -show_streams"
# find video duration
# ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1
# find video frame rate (not working)
# ffprobe -v error -select_streams v:0 -show_entries stream=avg_frame_rate -of default=noprint_wrappers=1:nokey=1
args = shlex.split(cmd)
args.append(file)
res = subprocess.check_output(args).decode('utf-8')
res = json.loads(res)
return res
def createThumbnails(file):
data = probeFile(file)
maxThumbs = 30
thumbWidth = 320
# find duration
duration = data['streams'][0]['duration']
frameRate = data['streams'][0]['avg_frame_rate'].split('/')[0]
numFrames = int(float(duration) * float(frameRate)) + 1
#numFrames = end - start
mod = int(math.ceil(numFrames*1.0 / maxThumbs))
numTiles = (numFrames / mod) + 1
print 'writing ' + str(numTiles)
# find height and width
height = data['streams'][0]['height']
width = data['streams'][0]['width']
print 'duration (seconds: ' + duration
print 'duration (frames): ' + str(numFrames)
print frameRate
print 'write every ' + str(mod) + ' frames'
dir = os.path.dirname(file)
parts = os.path.splitext(os.path.basename(file))
outputFile = dir + '/' + parts[0] + '_sprites_' + str(numTiles*thumbWidth) + '.jpg'
#mod = 100
#eq(mod(50,1),1)
#select='not(mod(n\,100))'
filtersArr = [
"select='not(mod(n\," + str(mod) + "))'",
"scale=320:-1",
"tile=" + str(numTiles) + "x1"
]
filters = ",".join(filtersArr)
print filters
# -qscale:v controls the quality of the video
# 2 is best quality, 31 is worst
subprocess.Popen([
'ffmpeg',
'-i', file, # inputs
'-vf', filters, # video filters
'-qscale:v', '4',
'-vsync', 'vfr',
outputFile
])
#pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
sel = nuke.selectedNodes()
for n in sel:
file = n['file'].value()
print file
createThumbnails(file)
#createThumbnails(file)
|
michaelb-01/pipe
|
scripts/python/genSprites_mlb_v001.py
|
Python
|
mit
| 2,338
|
def primes():
n = 2
while True:
if all(n % div for div in range(2, n)):
yield n
n += 1
gen = primes()
for n in range(20):
print(next(gen))
|
willmcgugan/asyncchat
|
prime.py
|
Python
|
mit
| 181
|
# coding: utf-8
from __future__ import with_statement
from commands import Command
import time
import re
from socket import *
from socket import *
from struct import *
import datetime
import utility
import thread
import sys
import traceback
list_lock = thread.allocate_lock()
def tw_get_num_players(address, port):
sock = socket(AF_INET, SOCK_DGRAM)
sock.settimeout(5.0);
sock.sendto("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xffgief", (address, port))
data, addr = sock.recvfrom(1024)
sock.close()
data = data[14:]
slots = data.split("\x00")
server_info = slots[0:8]
server_name, map_name = slots[1:3]
data = data[-2:]
num_players, max_players = map(int, slots[6:8])
return num_players, max_players
def tw_get_num_players_proxy(address, port, players_dic):
try:
num_players, max_players = tw_get_num_players(address, port)
with list_lock:
players_dic[thread.get_ident()] = num_players
except:
#print 'exception O.o', sys.exc_info(), traceback.extract_tb(sys.exc_info()[2])
with list_lock:
players_dic[thread.get_ident()] = -1
def tw_get_info():
counter = 0
address = "master.teewars.com"
master_port = 8300
sock = socket(AF_INET, SOCK_DGRAM)
sock.settimeout(5.0)
sock.sendto("\x20\x00\x00\x00\x00\x00\xff\xff\xff\xffreqt", (address, master_port))
try:
data, addr = sock.recvfrom(1024)
sock.close()
data = data[14:]
num_servers = len(data) / 6
num_players = 0
players_dic = {}
for n in range(0, num_servers):
ip = ".".join(map(str, map(ord, data[n*6:n*6+4])))
port = ord(data[n*6+5]) * 256 + ord(data[n*6+4])
#print ip, port
with list_lock:
id = thread.start_new_thread(tw_get_num_players_proxy, (ip, port, players_dic))
players_dic[id] = -2
while True:
has_items = False
with list_lock:
for slot in players_dic.keys():
if players_dic[slot] == -2:
has_items = True
break
if has_items:
time.sleep(0.5)
else:
break
players_list = []
for slot in players_dic.keys():
if players_dic[slot] != -1:
players_list.append(players_dic[slot])
num_servers = len(players_list)
num_players = reduce(lambda x, y: x + y, players_list)
with open("data/tw_stats.txt", "a") as file:
file.write("%s %s %s\n" % (int(time.time()), num_servers, num_players))
utility.read_url("http://serp.starkast.net/berserker/gief_stats.php?timestamp=%s&servers=%s&players=%s" % (int(time.time()), num_servers, num_players));
return (num_servers, num_players)
except:
print 'exception O.o', sys.exc_info(), traceback.extract_tb(sys.exc_info()[2])
return None
class TeewarsCommand(Command):
def __init__(self):
self.next_beat = None
self.cached_info = None
def trig_twinfo(self, bot, source, target, trigger, argument):
self.cached_info = tw_get_info()
if self.cached_info:
return "There are currently %s public Teewars servers with a total of %s players." % self.cached_info
else:
return "I don't have any stats yet... Please wait a minute! :-)"
def trig_teewars(self, bot, source, target, trigger, argument):
address = "jp.serp.se"
port = 8303
m = re.search("^(\S+)[: ]?(\d*)", argument)
if m:
address = m.group(1)
if m.group(2):
port = int(m.group(2))
num_players, max_players = tw_get_num_players(address, port)
return "Server has %d/%d players." % (num_player, max_players)
#return "Server '%s' at %s:%s is playing %s with %s/%s players." % (server_name, address, port, map_name, num_players, max_players)
def timer_beat(self, bot, now):
if not self.next_beat or self.next_beat < now:
self.next_beat = now + datetime.timedelta(0, 0, 0, 0, 2)
info = tw_get_info()
if info:
self.cached_info = info
|
serpis/pynik
|
plugins/teewars.py
|
Python
|
mit
| 3,719
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import io
import requests
import subprocess
import sys
DEFAULT_GLOBAL_FAUCET = 'https://signetfaucet.groestlcoin.org/claim'
DEFAULT_GLOBAL_CAPTCHA = 'https://signetfaucet.groestlcoin.org/captcha'
GLOBAL_FIRST_BLOCK_HASH = '000000eb8848ae1ec123d4b017f1a62d315d944374b67991526442c02f865a38'
# braille unicode block
BASE = 0x2800
BIT_PER_PIXEL = [
[0x01, 0x08],
[0x02, 0x10],
[0x04, 0x20],
[0x40, 0x80],
]
BW = 2
BH = 4
# imagemagick or compatible fork (used for converting SVG)
CONVERT = 'convert'
class PPMImage:
'''
Load a PPM image (Pillow-ish API).
'''
def __init__(self, f):
if f.readline() != b'P6\n':
raise ValueError('Invalid ppm format: header')
line = f.readline()
(width, height) = (int(x) for x in line.rstrip().split(b' '))
if f.readline() != b'255\n':
raise ValueError('Invalid ppm format: color depth')
data = f.read(width * height * 3)
stride = width * 3
self.size = (width, height)
self._grid = [[tuple(data[stride * y + 3 * x:stride * y + 3 * (x + 1)]) for x in range(width)] for y in range(height)]
def getpixel(self, pos):
return self._grid[pos[1]][pos[0]]
def print_image(img, threshold=128):
'''Print black-and-white image to terminal in braille unicode characters.'''
x_blocks = (img.size[0] + BW - 1) // BW
y_blocks = (img.size[1] + BH - 1) // BH
for yb in range(y_blocks):
line = []
for xb in range(x_blocks):
ch = BASE
for y in range(BH):
for x in range(BW):
try:
val = img.getpixel((xb * BW + x, yb * BH + y))
except IndexError:
pass
else:
if val[0] < threshold:
ch |= BIT_PER_PIXEL[y][x]
line.append(chr(ch))
print(''.join(line))
parser = argparse.ArgumentParser(description='Script to get coins from a faucet.', epilog='You may need to start with double-dash (--) when providing groestlcoin-cli arguments.')
parser.add_argument('-c', '--cmd', dest='cmd', default='groestlcoin-cli', help='groestlcoin-cli command to use')
parser.add_argument('-f', '--faucet', dest='faucet', default=DEFAULT_GLOBAL_FAUCET, help='URL of the faucet')
parser.add_argument('-g', '--captcha', dest='captcha', default=DEFAULT_GLOBAL_CAPTCHA, help='URL of the faucet captcha, or empty if no captcha is needed')
parser.add_argument('-a', '--addr', dest='addr', default='', help='Groestlcoin address to which the faucet should send')
parser.add_argument('-p', '--password', dest='password', default='', help='Faucet password, if any')
parser.add_argument('-n', '--amount', dest='amount', default='0.001', help='Amount to request (0.001-0.1, default is 0.001)')
parser.add_argument('-i', '--imagemagick', dest='imagemagick', default=CONVERT, help='Path to imagemagick convert utility')
parser.add_argument('bitcoin_cli_args', nargs='*', help='Arguments to pass on to groestlcoin-cli (default: -signet)')
args = parser.parse_args()
if args.bitcoin_cli_args == []:
args.bitcoin_cli_args = ['-signet']
def bitcoin_cli(rpc_command_and_params):
argv = [args.cmd] + args.bitcoin_cli_args + rpc_command_and_params
try:
return subprocess.check_output(argv).strip().decode()
except FileNotFoundError:
print('The binary', args.cmd, 'could not be found.')
exit(1)
except subprocess.CalledProcessError:
cmdline = ' '.join(argv)
print(f'-----\nError while calling "{cmdline}" (see output above).')
exit(1)
if args.faucet.lower() == DEFAULT_GLOBAL_FAUCET:
# Get the hash of the block at height 1 of the currently active signet chain
curr_signet_hash = bitcoin_cli(['getblockhash', '1'])
if curr_signet_hash != GLOBAL_FIRST_BLOCK_HASH:
print('The global faucet cannot be used with a custom Signet network. Please use the global signet or setup your custom faucet to use this functionality.\n')
exit(1)
else:
# For custom faucets, don't request captcha by default.
if args.captcha == DEFAULT_GLOBAL_CAPTCHA:
args.captcha = ''
if args.addr == '':
# get address for receiving coins
args.addr = bitcoin_cli(['getnewaddress', 'faucet', 'bech32'])
data = {'address': args.addr, 'password': args.password, 'amount': args.amount}
# Store cookies
# for debugging: print(session.cookies.get_dict())
session = requests.Session()
if args.captcha != '': # Retrieve a captcha
try:
res = session.get(args.captcha)
except:
print('Unexpected error when contacting faucet:', sys.exc_info()[0])
exit(1)
# Convert SVG image to PPM, and load it
try:
rv = subprocess.run([args.imagemagick, '-', '-depth', '8', 'ppm:-'], input=res.content, check=True, capture_output=True)
except FileNotFoundError:
print('The binary', args.imagemagick, 'could not be found. Please make sure ImageMagick (or a compatible fork) is installed and that the correct path is specified.')
exit(1)
img = PPMImage(io.BytesIO(rv.stdout))
# Terminal interaction
print_image(img)
print('Enter captcha: ', end='')
data['captcha'] = input()
try:
res = session.post(args.faucet, data=data)
except:
print('Unexpected error when contacting faucet:', sys.exc_info()[0])
exit(1)
# Display the output as per the returned status code
if res:
# When the return code is in between 200 and 400 i.e. successful
print(res.text)
elif res.status_code == 404:
print('The specified faucet URL does not exist. Please check for any server issues/typo.')
elif res.status_code == 429:
print('The script does not allow for repeated transactions as the global faucet is rate-limitied to 1 request/IP/day. You can access the faucet website to get more coins manually')
else:
print(f'Returned Error Code {res.status_code}\n{res.text}\n')
print('Please check the provided arguments for their validity and/or any possible typo.')
|
GroestlCoin/bitcoin
|
contrib/signet/getcoins.py
|
Python
|
mit
| 6,315
|
import argparse
def parseircdata(data):
"""
The parser for arguments to setup IRC channels.
Returns an argparse-namespace containing hostname,
port, nickname, realname, ident, and channel.
"""
parser = argparse.ArgumentParser(description='An IRC client')
parser.add_argument('--hs',
dest='host',
metavar='HOST',
default='irc.freenode.net',
help='the name of the host')
parser.add_argument('--p',
dest='port',
metavar='PORT',
type=int,
default=6667,
help='the port')
parser.add_argument('--n',
dest='nick',
metavar='NICK',
default='my_nickname',
help='the nickname of user')
parser.add_argument('--i',
dest='ident',
metavar='IDENT',
default='an_ident',
help='the ident of user')
parser.add_argument('--r',
dest='realname',
metavar='REALNAME',
default='a_realname',
help='the realname of user')
parser.add_argument('--c',
dest='channel',
metavar='CHANNEL',
default='#r',
help='the channel to join')
ircdata = parser.parse_args(data.split())
ircdata.channel = ircdata.channel if ircdata.channel[0] == '#' else '#' + ircdata.channel
return ircdata
|
kultprok/circa
|
clientparser.py
|
Python
|
mit
| 1,712
|
#!/usr/bin/env python
from setuptools import setup, find_packages
with open('README.rst') as file:
long_description = file.read()
setup(name='mygmm',
version='1.0',
description='GMM estimator for educational purposes',
long_description=long_description,
author='Stanislav Khrapov',
license='MIT',
author_email='khrapovs@gmail.com',
url='https://github.com/khrapovs/mygmm',
packages=find_packages(),
keywords=['GMM', 'econometrics', 'estimation', 'moments'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
khrapovs/mygmm
|
setup.py
|
Python
|
mit
| 957
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
vault_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> Optional["_models.PrivateEndpointConnection"]:
"""Gets the specified private endpoint connection associated with the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the key vault.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2019_09_01.models.PrivateEndpointConnection or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def put(
self,
resource_group_name: str,
vault_name: str,
private_endpoint_connection_name: str,
properties: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Updates the specified private endpoint connection associated with the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the key vault.
:type private_endpoint_connection_name: str
:param properties: The intended state of private endpoint connection.
:type properties: ~azure.mgmt.keyvault.v2019_09_01.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2019_09_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.put.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(properties, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vault_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> Optional["_models.PrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
vault_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateEndpointConnection"]:
"""Deletes the specified private endpoint connection associated with the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:param private_endpoint_connection_name: Name of the private endpoint connection associated
with the key vault.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.keyvault.v2019_09_01.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vault_name=vault_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2019_09_01/aio/operations/_private_endpoint_connections_operations.py
|
Python
|
mit
| 16,533
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-06 14:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Scholien', '0004_buechlein_admin_kaeufe'),
]
operations = [
migrations.AlterField(
model_name='buechlein',
name='admin_kaeufe',
field=models.TextField(blank=True, default=''),
),
]
|
valuehack/scholarium.at
|
Scholien/migrations/0005_auto_20171106_1407.py
|
Python
|
mit
| 474
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
"""
Convert kmeans output into the Semeval system answer format.
"""
import sys
from collections import defaultdict as dd
import re
regex = re.compile("<((.*\.\w)\.\w+\.\d+)>\s+(\d+)\n")
d = dd(lambda: dd(lambda: dd(int)))
for line in sys.stdin:
m = regex.match(line)
inst_id, tw, label = map(m.group, [1,2,3])
print "{0} {1} {2}-{3}/1.0".format(tw, inst_id, tw, label)
|
ai-ku/uwsd
|
run/find-sense-XYb.py
|
Python
|
mit
| 459
|
from RGT.XML.SVG.basicSvgNode import BasicSvgNode
from xml.dom.minidom import Node
class BaseScriptNode(BasicSvgNode):
def __init__(self, ownerDoc, tagName):
BasicSvgNode.__init__(self, ownerDoc, tagName)
self.allowAllSvgNodesAsChildNodes = True
def setData(self, data):
if data is not None:
foundCDataNode = False
#check to see if we have a child node (text node)
if len(self.childNodes) >= 1:
for child in self.childNodes:
#search for the first cdata node and interpret it as being the node that contains the data
if child.nodeType == Node.CDATA_SECTION_NODE:
child.data = data
foundCDataNode = True
break
if foundCDataNode == False:
textNode = self.ownerDocument.createCDATASection(data)
self.appendChild(textNode)
else:
textNode = self.ownerDocument.createCDATASection(data)
self.appendChild(textNode)
def getData(self):
if len(self.childNodes) >= 1:
for child in self.childNodes:
#search for the first text node and interpret it as being the node that contains the css data
if child.nodeType == Node.CDATA_SECTION_NODE:
return child.data
return None
def appendChild(self, node):
if node.nodeType == Node.CDATA_SECTION_NODE:
if len(self.childNodes) == 0:
BasicSvgNode.appendChild(self, node)
else:
raise Exception('only one CDATA node can be present, use the getData and setData to change the data')
else:
raise Exception('only CDATA nodes can be added')
|
danrg/RGT-tool
|
src/RGT/XML/SVG/baseScriptNode.py
|
Python
|
mit
| 1,896
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys, re
import urllib, urllib2, json
import operator
from SPARQLWrapper import SPARQLWrapper, JSON
import nltk
from nltk.corpus import stopwords
import enchant # spell check
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
import numpy
from collections import defaultdict
from HTMLParser import HTMLParser
from random import shuffle
## constant definitions
DOCUMENT_DESCRIPTION = 0
DOCUMENT_DEFINITION = 1
DOCUMENT_NEWS = 2
DOCUMENT_URL = 3
## helper functions
def printSPARQL(results):
if (results["head"].has_key("vars")):
var_list = results["head"]["vars"]
for r in results["results"]["bindings"]:
for v in var_list:
print str(v) + ": " + str(r[v]["value"].encode('utf8'))
elif (results.has_key("boolean")):
# answering an ASK
print results["boolean"]
class Document():
def __init__(self, t='', r='', docType=0, source=''):
self.title = t
self.raw = r
self.docType = docType
self.source = source
class Source():
def __init__(self):
self.documents = list()
self.name = ''
def __repr__(self):
print str(name) + " has " + str(len(self.documents)) + " documents."
def generateDocuments(self):
return
class SourceHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.body = []
self.title = ''
self.curr_tag = ''
self.in_body = False
self.in_title = False
def handle_starttag(self, tag, attrs):
if tag == 'title':
self.in_title = True
if tag == 'body':
self.in_body = True
self.curr_tag = tag
def handle_endtag(self, tag):
if tag == 'body':
self.in_body = False
if tag == 'title':
self.in_title = False
self.curr_tag = ''
def handle_data(self, data):
read_data = False
if self.in_body and self.curr_tag <> 'script' and self.curr_tag <> 'style':
read_data = True
if read_data:
if data.strip() <> '':
self.body.append(data)
if self.in_title:
self.title = data
class HTMLSource(Source):
# basic source single html document, strips out all tags and gets text from body of HTML only
def __init__(self, url=''):
Source.__init__(self)
self.name = url
self.url = url
self.clear_internal_tags = True # try to automatically remove link, span and other tags that may break up continuous text
def generateDocuments(self):
p = SourceHTMLParser()
# in case it can't find a title
p.title = self.name
h = {
'User-Agent': "IdylGames/0.1, (dev@idylgames.com)"
}
req = urllib2.Request(self.url, headers=h)
cxn = urllib2.urlopen(req)
for line in cxn.readlines():
if self.clear_internal_tags:
line = re.sub(r'<(a|b|span|i|small|strong)\s.*?>','',line)
line = re.sub(r'</(a|b|span|i|small|strong)>','',line)
p.feed(line)
d = Document(p.title, '\n'.join(p.body), DOCUMENT_URL, self.url)
return d
class DBPediaSource(Source):
def __init__(self):
Source.__init__(self)
self.name = "DBPedia RDF"
self.prefixes = """
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX : <http://dbpedia.org/resource/>
PREFIX dbpedia2: <http://dbpedia.org/property/>
PREFIX dbpedia: <http://dbpedia.org/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX dbo: <http://dbpedia.org/ontology/>
"""
self.prefixes_lod = """
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX : <http://lod.openlinksw.com/resource/>
PREFIX dbpedia2: <http://lod.openlinksw.com/property/>
PREFIX dbpedia: <http://lod.openlinksw.com/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX dbo: <http://lod.openlinksw.com/ontology/>
"""
self.endpoint = "http://live.dbpedia.org/sparql"
self.endpoint_lod = "http://lod.openlinksw.com/sparql"
self.sparql = SPARQLWrapper(self.endpoint)
self.sparql.setReturnFormat(JSON)
self.sparql.setTimeout(500)
self.links = list()
self.topic_urls = list()
self.visited = list()
self.category_search = True
def generateDocuments(self, term):
if term in self.visited:
return list()
# pull down abstracts of all term concepts who belong to the category given by the term
titles = list()
docs = list()
# basic query to get abstract for search term
print "----- MAIN"
sparql_cmd = self.prefixes + """
SELECT ?title ?abstract ?topic
WHERE {
?title rdfs:label "%s"@en .
?title <http://dbpedia.org/ontology/abstract> ?abstract .
FILTER (langmatches(lang(?abstract), 'en')) .
OPTIONAL {
?title foaf:isPrimaryTopicOf ?topic .
}
} LIMIT 50
""" %(term)
self.sparql.setQuery(sparql_cmd)
try:
results = self.sparql.query().convert()
r = results["results"]["bindings"][0]
title = term
abstract = r["abstract"]["value"]
titles.append(term)
if abstract <> '':
d = Document(term, abstract.encode('utf8'), DOCUMENT_DESCRIPTION, 'dbpedia')
print "Added Definitions " + str(title)
docs.append(d)
topic_url = r["topic"]["value"]
print "Added topic url " + str(topic_url)
self.topic_urls.append(topic_url)
except Exception, e:
print "Error1: " + str(e)
pass
# add links
print "----- LINKS"
sparql_cmd = self.prefixes + """
SELECT ?link
WHERE {
?x rdfs:label "%s"@en .
?x <http://purl.org/dc/terms/subject> ?y .
?y rdfs:label ?link .
}
""" %(term)
self.sparql.setQuery(sparql_cmd)
try:
results = self.sparql.query().convert()
for r in results["results"]["bindings"]:
try:
link = r['link']['value'].encode('utf8')
if link not in self.links and (link <> term):
self.links.append(link)
print "Added link " + str(link)
except Exception, e:
print "Error2: " + str(e)
continue
except Exception, e:
pass
# search for concepts or categories that match the search term
if self.category_search:
print "----- CATEGORIES"
sparql_cmd = self.prefixes + """
SELECT ?title ?abstract
WHERE {
?term_concept a skos:Concept .
?term_concept rdfs:label "%s"@en .
?s <http://purl.org/dc/terms/subject> ?term_concept .
OPTIONAL {
?s dbo:abstract ?abstract .
}
?s rdfs:label ?title .
FILTER(langmatches(lang(?title), 'en')) .
FILTER(langmatches(lang(?abstract), 'en')) .
}
""" %(term)
self.sparql.setQuery(sparql_cmd)
try:
results = self.sparql.query().convert()
for r in results["results"]["bindings"]:
try:
if r['title']['value'].encode('utf8') not in titles:
title = r['title']['value'].encode('utf8')
abstract = r['abstract']['value'].encode('utf8')
if abstract <> '':
titles.append(title)
d = Document(title, abstract , DOCUMENT_DESCRIPTION, 'dbpedia')
self.links.append(title)
print "Added category " + str(title)
docs.append(d)
except Exception, e:
print "Error3: " + str(e)
continue
except Exception,e:
pass
# pull down redirects if necessary
print "----- REDIRECTS"
sparql_cmd = self.prefixes + """
SELECT ?redirectLabel ?redirectAbstract
WHERE {
?title rdfs:label "%s"@en .
?title <http://dbpedia.org/ontology/wikiPageRedirects> ?redirectTo .
?redirectTo rdfs:label ?redirectLabel .
?redirectTo <http://dbpedia.org/ontology/abstract> ?redirectAbstract .
FILTER (langMatches(lang(?redirectLabel), 'en')) .
FILTER (langMatches(lang(?redirectAbstract), 'en')) .
}
""" %(term)
self.sparql.setQuery(sparql_cmd)
try:
results = self.sparql.query().convert()
print "Redirects: " + str(len(results["results"]["bindings"]))
for r in results["results"]["bindings"]:
try:
if r['redirectLabel']['value'].encode('utf8') not in titles:
title = r['redirectLabel']['value'].encode('utf8')
abstract = r['redirectAbstract']['value'].encode('utf8')
if abstract <> '':
titles.append(title)
d = Document(title, abstract , DOCUMENT_DESCRIPTION, 'dbpedia')
self.links.append(title)
print "Added document " + str(title)
docs.append(d)
#if r['redirectLabel']['value'].encode('utf8') not in self.links:
# self.links.append(r['redirectLabel']['value'].encode('utf8'))
# print "Added redirect link to " + str(r['redirectLabel']['value'].encode('utf8'))
except Exception, e:
print "Error4: " + str(e)
continue
except Exception,e:
pass
self.visited.append(term)
return docs
class FeedzillaSource(Source):
def __init__(self):
Source.__init__(self)
self.name = "Feedzilla REST"
self.article_search = "http://api.feedzilla.com/v1/articles/search.json?q="
def generateDocuments(self, term):
h = {'User-Agent': "IdylGames/0.1, (dev@idylgames.com)"}
url = self.article_search + term
req = urllib2.Request(url, headers=h)
cxn = urllib2.urlopen(req)
result = json.loads(cxn.read())
processed = list()
docs = list()
try:
for a in result["articles"]:
source_url = a["url"]
d = HTMLSource(source_url)
doc = d.generateDocuments()
title = doc.title
raw = doc.raw
print title
print raw
if title not in processed:
if raw <> '':
d = Document(title, raw, DOCUMENT_NEWS, 'feedzilla')
docs.append(d)
#print "Added url " + source_url + " title " + title + " and start data " + raw[0:100]
processed.append(title)
except Exception,e:
print e
pass
return docs
class WikipediaSource(Source):
def __init__(self):
Source.__init__(self)
self.wiki_en_endpoint = "http://en.wikipedia.org/w/api.php"
self.name = "MediaWiki API"
self.links = list()
self.sections = dict()
def processWikiPage(self, data):
refs = re.compile('<ref.*?>.*?</ref>', re.DOTALL)
data = refs.sub('', data)
files = re.compile('File:.*?px\|')
data = files.sub('', data)
links = list()
temp = data
for link in re.finditer('\[\[', data, re.M):
start = link.span()[1] # location of second [
for i in range(start, len(data)-1):
char = data[i]
next_char = data[i+1]
if (char + next_char == ']]'):
lnk_text = data[start:i].split('|')
#links.append(lnk_text[0].replace(' ', '_'))
links.append(lnk_text[0])
if len(lnk_text) > 1:
# link and link text are different so replace content with link text
temp = temp.replace(data[start:i], lnk_text[1])
break
if (char + next_char == '[['):
# bail, this is a nested loop which means we're looking somewhere not cool
break
data = temp
data = data.replace("{{", "")
temp = data.replace("}}","")
data = data.replace("[[","")
data = data.replace("]]","")
return data, links
def generateDocuments(self, term, section=0):
h = {
'User-Agent': "IdylGames/0.1, (dev@idylgames.com)"
}
docs = list()
try:
# note rvsection = 0 means we're just getting the first section
wiki_url = self.wiki_en_endpoint + "?action=query&titles=" + urllib.quote_plus(term) + "&prop=revisions&rvprop=content&format=json&rvsection=" + str(section)
req = urllib2.Request(wiki_url, headers=h)
cxn = urllib2.urlopen(req)
result = json.loads(cxn.read())
# now parse the result a bit to get links and top results
current_rev_id = result['query']['pages'].keys()[0]
data = result['query']['pages'][current_rev_id]['revisions'][0]['*']
raw, links = self.processWikiPage(data)
self.links = links
title = term
d = Document(title, raw, DOCUMENT_DEFINITION, 'wikipedia')
docs.append(d)
except Exception,e:
print e
return docs
class ConceptNetSource(Source):
def __init__(self):
Source.__init__(self)
self.name = "ConceptNet REST"
self.relations = ["CreatedBy", "HasContext", "HasProperty", "Causes", "AtLocation", "UsedFor", "CapableOf", "PartOf", "MemberOf", "IsA" ]
#self.relations = [ "HasContext" ]
self.search_endpoint = "http://conceptnet5.media.mit.edu/data/5.2/search?"
self.assoc_endpoint = "http://conceptnet5.media.mit.edu/data/5.2/assoc?"
def generateDocuments(self, term):
h = {
'User-Agent': "IdylGames/0.1, (dev@idylgames.com)"
}
docs = list()
processed = list()
try:
for rel in self.relations:
rel_arg = "/r/" + rel
cn_url = self.search_endpoint + "text=" + urllib.quote_plus(term) + "&rel=" + rel_arg
print cn_url
req = urllib2.Request(cn_url, headers=h)
cxn = urllib2.urlopen(req)
result = json.loads(cxn.read())
try:
for edge in result["edges"]:
temp = edge["startLemmas"]
temp = temp.split(' ')
title = temp[0].encode('utf8')
raw = ' '.join(temp[1:]).encode('utf8')
if title not in processed:
if raw <> '':
d = Document(title, raw, DOCUMENT_DEFINITION, 'conceptnet')
docs.append(d)
processed.append(title)
except Exception,e:
print e
pass
except Exception,e:
print e
return docs
class WordList():
def __init__(self, search_term):
search_term = sys.argv[1].lower()
search_term = search_term[0].upper() + search_term[1:]
self.search_term = search_term
def build(self):
print "Searching for: " + self.search_term
docs = list()
depth = 5 # max depths to check if documents haven't been found
min_docs = 20
max_docs = 100
#feedzilla = FeedzillaSource()
#print "generating feedzilla sources..."
#docs = docs + feedzilla.generateDocuments(self.search_term)
dbpedia = DBPediaSource()
print "generating dbpedia sources..."
try:
docs = docs + dbpedia.generateDocuments(self.search_term)
except urllib2.HTTPError:
print "Fallback to LOD"
# for now this overwrites but that's okay
dbpedia.endpoint = dbpedia.endpoint_lod
dbpedia.prefixes = dbpedia.prefixes_lod
docs = dbpedia.generateDocuments(self.search_term)
# DBPedia can also use:
# skos:broader
# note it should present disambiguates back to user
count = 0
titles = [ d.title for d in docs ]
# turn off category search after the first search
dbpedia.category_search = False
# the or will make sure we go down the rabbit hole once
while ((count < depth) and (len(docs) < min_docs)) :
print "Following step " + str(count+1)
follow = dbpedia.links
shuffle(follow)
dbpedia.links = list()
for link in follow:
print "Following " + str(link)
retVal = dbpedia.generateDocuments(link)
for r in retVal:
if r.title not in titles:
docs.append(r)
print "Documents: " + str(len(docs))
titles = [ d.title for d in docs ]
if len(docs) > max_docs:
break
count = count + 1
print "DBPedia docs: " + str(len(docs))
# increase minimum documents
min_docs = 50
wikipedia = WikipediaSource()
print "generating wikipedia sources..."
try:
docs = wikipedia.generateDocuments(self.search_term)
count = 0
titles = [ d.title for d in docs ]
while ((count < depth) and (len(docs) < min_docs)):
print "Following step " + str(count+1)
follow = wikipedia.links
shuffle(follow)
wikipedia.links = list()
print follow
for link in follow:
print "Following " + str(link.encode('utf8'))
retVal = wikipedia.generateDocuments(link)
for r in retVal:
if r.title not in titles:
docs.append(r)
print "Documents: " + str(len(docs))
titles = [ d.title for d in docs ]
if len(docs) > max_docs:
break
count = count + 1
except urllib2.HTTPError:
print "error with wikipedia, bailing on it"
conceptnet = ConceptNetSource()
print "generating conceptnet sources..."
docs = docs + conceptnet.generateDocuments(self.search_term)
print "Documents: " + str(len(docs))
nouns = dict()
verbs = dict()
adjs = dict()
titles = list()
entities = list()
lemm = WordNetLemmatizer()
for d in docs:
try:
# entities for both title and raw
#print "============================================================"
#print "Title: \n" + d.title
#print "Raw: \n" + d.raw
#print "============================================================"
ne_chunk = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(d.title)), True)
for n in ne_chunk.subtrees():
if n.label() == 'NE':
e = ' '.join([ x[0] for x in n.leaves() ])
entities.append(e)
ne_chunk = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(d.raw)), True)
for n in ne_chunk.subtrees():
if n.label() == 'NE':
e = ' '.join([ x[0] for x in n.leaves() ])
entities.append(e)
title_pos = d.title.encode("utf8").lower()
raw_pos = d.raw.encode("utf8").lower()
titles.append(title_pos)
title_pos = nltk.pos_tag(nltk.word_tokenize(title_pos))
raw_pos = nltk.pos_tag(nltk.word_tokenize(raw_pos))
for w,t in title_pos:
if w in stopwords.words('english'):
pass
elif t.startswith('N'):
lemm_noun = lemm.lemmatize(w, 'n')
if not nouns.has_key(w):
nouns[w] = 0
nouns[w] += 1
elif t.startswith('V'):
inf_verb = lemm.lemmatize(w, 'v')
if not verbs.has_key(inf_verb):
verbs[inf_verb] = 0
verbs[inf_verb] += 1
elif t.startswith('J'):
if not adjs.has_key(w):
adjs[w] = 0
adjs[w] += 1
for w,t in raw_pos:
if w in stopwords.words('english'):
pass
elif t.startswith('N'):
if not nouns.has_key(w):
nouns[w] = 0
nouns[w] += 1
elif t.startswith('V'):
inf_verb = lemm.lemmatize(w, 'v')
if not verbs.has_key(inf_verb):
verbs[inf_verb] = 0
verbs[inf_verb] += 1
elif t.startswith('J'):
if not adjs.has_key(w):
adjs[w] = 0
adjs[w] += 1
except Exception,e:
print e
pass
# do a frequency count first
sorted_nouns = sorted(nouns.items(), key = operator.itemgetter(1))
sorted_verbs = sorted(verbs.items(), key = operator.itemgetter(1))
sorted_adjs = sorted(adjs.items(), key = operator.itemgetter(1))
final_words = list()
def prob_sort_words(word_list):
#prob_sorted = [ (w, 1./f) for w,f in word_list if f <> 1 ]
prob_sorted = [ (w, 1./(numpy.log(1+f))) for w,f in word_list if f <> 1 ]
total = sum([ x[1] for x in prob_sorted ])
prob_sorted = [ (w, float(inv_f/total)) for w,inv_f in prob_sorted ]
return prob_sorted
n = prob_sort_words(sorted_nouns)
final_words = final_words + list(numpy.random.choice([ x[0] for x in n ], size=(len(n)/3), p=[ x[1] for x in n ], replace=False))
v = prob_sort_words(sorted_verbs)
final_words = final_words + list(numpy.random.choice([ x[0] for x in v ], size=(len(v)/3), p=[ x[1] for x in v ], replace=False))
j = prob_sort_words(sorted_adjs)
final_words = final_words + list(numpy.random.choice([ x[0] for x in j ], size=(len(j)/4), p=[ x[1] for x in j ], replace=False))
entities = list(set(entities))
if len(titles) > 0:
final_words = final_words + list(numpy.random.choice(titles, size=(len(titles)/4)))
if len(entities) > 0:
final_words = final_words + list(numpy.random.choice(entities, size=(len(entities)/6)))
# drop all multi-word entries: better later to split them
final_words = [ f.lower() for f in final_words if len(nltk.word_tokenize(f)) == 1 ]
# read ANC word frequency distribution
fin = open('anc-freq-lemma.txt', 'r')
raw_freq = fin.readlines()
fin.close()
frequencies = defaultdict(float)
total = 0
for line in raw_freq:
tokens = line.split('\t')
if (len(tokens) == 4):
w = tokens[0]
frequencies[w] += float(tokens[3])
total += float(tokens[3])
for k,v in frequencies.items():
frequencies[k] /= total
# drop words with zero frequency
final_filter = [ (w, frequencies[w]) for w in list(set(final_words)) if frequencies[w] <> 0 ]
final_filter = prob_sort_words(final_filter)
final_words = list(numpy.random.choice([ x[0] for x in final_filter ], size=len(final_filter), p=[ x[1] for x in final_filter ], replace=False))
# now remove words we know we don't want in there but might get in:
# wikipedia
final_words = list(set(final_words))
remove_words = [ 'wikipedia' ]
final_words = [ w for w in final_words if w not in remove_words ]
# remove duplicate stems
st = LancasterStemmer()
stemmed_words = list()
temp = list()
for w in final_words:
stem = st.stem(w)
if stem not in stemmed_words:
stemmed_words.append(stem)
# arbitrarily take the first word which matches the stem, ignore the others
temp.append(w)
final_words = temp
return final_words
if __name__ == "__main__":
if len(sys.argv) < 1:
print "Usage: wordSource.py <search_term>"
sys.exit()
search_term = sys.argv[1].lower()
search_term = search_term[0].upper() + search_term[1:]
test = WordList(search_term)
print test.build()
# word source algorithm
# use seed word to get terms from different sources:
# - dbpedia
# - ConceptNet:
# - parse return nodes by /c/en/<title>/n/<raw>
# - rels:
# - /r/CreatedBy (B created by A)
# - /r/HasContext (B has context A)
# - /r/HasProperty (A has B as a property)
# - /r/Causes (A and B are events and it's typical for A to cause B)
# - /r/AtLocation (A is the typical or inherent location of B)
# - /r/UsedFor (A is the typical or inherent location of B)
# - /r/CapableOf (Something that A can do is typically B)
# - /r/PartOf (A is part of B)
# - /r/MemberOf (A is member of B, B is a group that contains A)
# - /r/IsA (A is a subtype or specific instance of B)
# - also use http://conceptnet5.media.mit.edu/data/5.2/assoc/list/en/toast,cereal,juice,egg to build lists of terms that might be relevant
#
# Note that later on we can use conceptnet for definitions and for generating hints!
#
# Should expand source of documents to include:
# - wikimedia API full page of content for a single word if it exists
# - news APIs
|
helshowk/autocross
|
wordSource.py
|
Python
|
mit
| 28,445
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import nose2
except ImportError:
message = """nose2 package not installed, install test requirements with:
pip install -r requirements-test.txt
"""
raise ImportError(message)
if __name__ == "__main__":
nose2.discover()
|
ninuxorg/netdiff
|
runtests.py
|
Python
|
mit
| 297
|
# -*- coding: utf-8 -*-
import logging
import threading
import multiprocessing
import db
logging.basicConfig(level=logging.INFO)
class DataReader:
def __init__(self):
logging.info(u'create DataReader object')
self.queue_db_tasks = multiprocessing.Queue()
self.queue_db_results = multiprocessing.Queue()
logging.info(u'create db thread')
self.thread_db = threading.Thread(target=db.db_thread, args=(self.queue_db_tasks, self.queue_db_results))
self.thread_db.start()
logging.info(u'load db data')
self.drugs_data = DrugsDataArray(self)
self.drugs_types = DrugsTypes(self)
self.drugs_count = DrugsCount(self)
logging.info(u'DataReader - init finish')
def process(self, obj):
pass
class DrugsDataArray:
def __init__(self, data_reader):
self.data_reader = data_reader
self.array = dict()
self.data_reader.queue_db_tasks.put((u'read', u'SELECT * FROM drugs_data;'))
for _id, name, cost, _type in self.data_reader.queue_db_results.get():
self.array[_id] = DrugsData(_id, name, cost, _type)
def __getitem__(self, item):
return self.array[item]
def __setitem__(self, key, value):
self.array[key] = value
self.data_reader.queue_db_tasks.put((u'write',
u'UPDATE drugs_data SET name=%s, cost=%s, type=%s WHERE id=%s' %
(value[1], value[2], value[3], value[0])))
def __delitem__(self, key):
del self.array[key]
self.data_reader.queue_db_tasks.put((u'write', u'DELETE FROM drugs_data WHERE id=%s' % key))
class DrugsData:
def __init__(self, _id, name, cost, _type):
self.id, self.name, self.cost, self.type = _id, name, cost, DrugsTypes[_type]
class DrugsTypes:
def __init__(self, data_reader):
self.data_reader = data_reader
self.array = dict()
self.data_reader.queue_db_tasks.put((u'read', u'SELECT * FROM drugs_types;'))
for _id, name in self.data_reader.queue_db_results.get():
self.array[_id] = name
def __getitem__(self, item):
return self.array[item]
def __setitem__(self, key, value):
self.array[key] = value
self.data_reader.queue_db_tasks.put((u'write', u'UPDATE drugs_types SET name=%s WHERE id=%s' % (value, key)))
def __delitem__(self, key):
del self.array[key]
self.data_reader.queue_db_tasks.put((u'write', u'DELETE FROM drugs_types WHERE id=%s' % key))
class DrugsCount:
def __init__(self, data_reader):
self.data_reader = data_reader
self.array = dict()
self.data_reader.queue_db_tasks.put((u'read', u'SELECT * FROM drugs_count'))
for _id, available, sold_week in self.data_reader.queue_db_results.get():
self[_id] = [available, sold_week]
def __getitem__(self, item):
return self.array[item]
def __setitem__(self, key, value):
self.array[key] = value
self.data_reader.queue_db_tasks.put((u'write', u'UPDATE drugs_count SET available=%s, sold_week=%s'
u'WHERE id=%s' % (value[0], value[1], key)))
def __delitem__(self, key):
del self.array[key]
self.data_reader.queue_db_tasks.put((u'write', u'DELETE FROM drugs_count WHERE id=%s' % key))
|
gromoleg/pharmacy
|
libs/data/reader.py
|
Python
|
mit
| 3,425
|
'''
Created on 7 Feb 2015
@author: robrant
'''
MONGO_DBNAME = 'dfkm'
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
TESTING = True
"""
lat = 50.722
lon = -1.866
50m
"""
|
robrant/dfkm
|
config.py
|
Python
|
mit
| 172
|
from distutils.core import setup
setup(
name = "pyTAP",
version = "0.001",
py_modules = [ "TAP.Simple" ],
)
|
rjbs/PyTAP
|
setup.py
|
Python
|
mit
| 119
|